diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 066cfdc4bfb2e..83041bf1ceac0 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -83,4 +83,6 @@ BWC_VERSION: - "2.11.2" - "2.12.0" - "2.12.1" + - "2.13.0" + - "2.13.1" diff --git a/.github/workflows/changelog_verifier.yml b/.github/workflows/changelog_verifier.yml index 9456fbf8b4ca0..df7986b2c2d16 100644 --- a/.github/workflows/changelog_verifier.yml +++ b/.github/workflows/changelog_verifier.yml @@ -1,7 +1,7 @@ name: "Changelog Verifier" on: pull_request: - types: [opened, edited, review_requested, synchronize, reopened, ready_for_review, labeled, unlabeled] + types: [opened, synchronize, reopened, ready_for_review, labeled, unlabeled] jobs: # Enforces the update of a changelog file on every pull request @@ -13,7 +13,8 @@ jobs: with: token: ${{ secrets.GITHUB_TOKEN }} ref: ${{ github.event.pull_request.head.sha }} - - uses: dangoslen/changelog-enforcer@v3 + id: verify-changelog with: skipLabels: "autocut, skip-changelog" + changeLogPath: 'CHANGELOG.md' diff --git a/.github/workflows/detect-breaking-change.yml b/.github/workflows/detect-breaking-change.yml new file mode 100644 index 0000000000000..1913d070e8c24 --- /dev/null +++ b/.github/workflows/detect-breaking-change.yml @@ -0,0 +1,27 @@ +name: "Detect Breaking Changes" +on: + pull_request + +jobs: + detect-breaking-change: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-java@v4 + with: + distribution: temurin # Temurin is a distribution of adoptium + java-version: 21 + - uses: gradle/gradle-build-action@v3 + with: + cache-disabled: true + arguments: japicmp + gradle-version: 8.7 + build-root-directory: server + - if: failure() + run: cat server/build/reports/java-compatibility/report.txt + - if: failure() + uses: actions/upload-artifact@v4 + with: + name: java-compatibility-report.html + path: ${{ github.workspace }}/server/build/reports/java-compatibility/report.html + \ No newline at end of file diff --git a/.github/workflows/wrapper.yml b/.github/workflows/wrapper.yml index dcf2a09717d28..cc9fb2d78f919 100644 --- a/.github/workflows/wrapper.yml +++ b/.github/workflows/wrapper.yml @@ -8,4 +8,4 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: gradle/wrapper-validation-action@v2 + - uses: gradle/wrapper-validation-action@v3 diff --git a/CHANGELOG.md b/CHANGELOG.md index 57fd23b3cabec..751ee7354b091 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,24 +12,42 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Allow setting KEYSTORE_PASSWORD through env variable ([#12865](https://github.com/opensearch-project/OpenSearch/pull/12865)) - Add a counter to node stat (and _cat/shards) api to track shard going from idle to non-idle ([#12768](https://github.com/opensearch-project/OpenSearch/pull/12768)) - [Concurrent Segment Search] Disable concurrent segment search for system indices and throttled requests ([#12954](https://github.com/opensearch-project/OpenSearch/pull/12954)) +- [Tiered Caching] Make took time caching policy setting dynamic ([#13063](https://github.com/opensearch-project/OpenSearch/pull/13063)) +- Detect breaking changes on pull requests ([#9044](https://github.com/opensearch-project/OpenSearch/pull/9044)) +- Add cluster primary balance contraint for rebalancing with buffer ([#12656](https://github.com/opensearch-project/OpenSearch/pull/12656)) +- Derived fields support to derive field values at query time without indexing ([#12569](https://github.com/opensearch-project/OpenSearch/pull/12569)) +- Add support for more than one protocol for transport ([#12967](https://github.com/opensearch-project/OpenSearch/pull/12967)) +- Add changes for overriding remote store and replication settings during snapshot restore. ([#11868](https://github.com/opensearch-project/OpenSearch/pull/11868)) +- Reject Resize index requests (i.e, split, shrink and clone), While DocRep to SegRep migration is in progress.([#12686](https://github.com/opensearch-project/OpenSearch/pull/12686)) +- Add an individual setting of rate limiter for segment replication ([#12959](https://github.com/opensearch-project/OpenSearch/pull/12959)) ### Dependencies - Bump `org.apache.commons:commons-configuration2` from 2.10.0 to 2.10.1 ([#12896](https://github.com/opensearch-project/OpenSearch/pull/12896)) - Bump `asm` from 9.6 to 9.7 ([#12908](https://github.com/opensearch-project/OpenSearch/pull/12908)) -- Bump `net.minidev:json-smart` from 2.5.0 to 2.5.1 ([#12893](https://github.com/opensearch-project/OpenSearch/pull/12893)) +- Bump `net.minidev:json-smart` from 2.5.0 to 2.5.1 ([#12893](https://github.com/opensearch-project/OpenSearch/pull/12893), [#13117](https://github.com/opensearch-project/OpenSearch/pull/13117)) - Bump `netty` from 4.1.107.Final to 4.1.108.Final ([#12924](https://github.com/opensearch-project/OpenSearch/pull/12924)) -- Bump `commons-io:commons-io` from 2.15.1 to 2.16.0 ([#12996](https://github.com/opensearch-project/OpenSearch/pull/12996)) +- Bump `commons-io:commons-io` from 2.15.1 to 2.16.0 ([#12996](https://github.com/opensearch-project/OpenSearch/pull/12996), [#12998](https://github.com/opensearch-project/OpenSearch/pull/12998), [#12999](https://github.com/opensearch-project/OpenSearch/pull/12999)) +- Bump `org.apache.commons:commons-compress` from 1.24.0 to 1.26.1 ([#12627](https://github.com/opensearch-project/OpenSearch/pull/12627)) +- Bump `org.apache.commons:commonscodec` from 1.15 to 1.16.1 ([#12627](https://github.com/opensearch-project/OpenSearch/pull/12627)) +- Bump `org.apache.commons:commonslang` from 3.13.0 to 3.14.0 ([#12627](https://github.com/opensearch-project/OpenSearch/pull/12627)) +- Bump Apache Tika from 2.6.0 to 2.9.2 ([#12627](https://github.com/opensearch-project/OpenSearch/pull/12627)) +- Bump `com.gradle.enterprise` from 3.16.2 to 3.17.1 ([#13116](https://github.com/opensearch-project/OpenSearch/pull/13116), [#13191](https://github.com/opensearch-project/OpenSearch/pull/13191)) +- Bump `gradle/wrapper-validation-action` from 2 to 3 ([#13192](https://github.com/opensearch-project/OpenSearch/pull/13192)) ### Changed - [BWC and API enforcement] Enforcing the presence of API annotations at build time ([#12872](https://github.com/opensearch-project/OpenSearch/pull/12872)) - Improve built-in secure transports support ([#12907](https://github.com/opensearch-project/OpenSearch/pull/12907)) +- Update links to documentation in rest-api-spec ([#13043](https://github.com/opensearch-project/OpenSearch/pull/13043)) +- Refactoring globMatch using simpleMatchWithNormalizedStrings from Regex ([#13104](https://github.com/opensearch-project/OpenSearch/pull/13104)) ### Deprecated ### Removed ### Fixed +- Fix bulk API ignores ingest pipeline for upsert ([#12883](https://github.com/opensearch-project/OpenSearch/pull/12883)) - Fix issue with feature flags where default value may not be honored ([#12849](https://github.com/opensearch-project/OpenSearch/pull/12849)) +- Fix implement mark() and markSupported() in class FilterStreamInput ([#13098](https://github.com/opensearch-project/OpenSearch/pull/13098)) ### Security diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 4a1162cf2558b..bce6ca0d49294 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,18 +1,18 @@ - [Contributing to OpenSearch](#contributing-to-opensearch) - - [First Things First](#first-things-first) - - [Ways to Contribute](#ways-to-contribute) +- [First Things First](#first-things-first) +- [Ways to Contribute](#ways-to-contribute) - [Bug Reports](#bug-reports) - [Feature Requests](#feature-requests) - [Documentation Changes](#documentation-changes) - [Contributing Code](#contributing-code) - - [Developer Certificate of Origin](#developer-certificate-of-origin) - - [Changelog](#changelog) - - [Review Process](#review-process) - - [Troubleshooting Failing Builds](#troubleshooting-failing-builds) +- [Developer Certificate of Origin](#developer-certificate-of-origin) +- [Changelog](#changelog) +- [Review Process](#review-process) + - [Tips for Success](#tips) # Contributing to OpenSearch -OpenSearch is a community project that is built and maintained by people just like **you**. We're glad you're interested in helping out. There are several different ways you can do it, but before we talk about that, let's talk about how to get started. +OpenSearch is a community project built and maintained by people just like **you**. We're glad you're interested in helping out. There are several different ways you can do it, but before we talk about that, let's talk about how to get started. ## First Things First @@ -30,9 +30,9 @@ Ugh! Bugs! A bug is when software behaves in a way that you didn't expect and the developer didn't intend. To help us understand what's going on, we first want to make sure you're working from the latest version. Please make sure you're testing against the [latest version](https://github.com/opensearch-project/OpenSearch). -Once you've confirmed that the bug still exists in the latest version, you'll want to check to make sure it's not something we already know about on the [open issues GitHub page](https://github.com/opensearch-project/OpenSearch/issues). +Once you've confirmed that the bug still exists in the latest version, you'll want to check the bug is not something we already know about. A good way to figure this out is to search for your bug on the [open issues GitHub page](https://github.com/opensearch-project/OpenSearch/issues). -If you've upgraded to the latest version and you can't find it in our open issues list, then you'll need to tell us how to reproduce it. To make the behavior as clear as possible, please provided your steps as `curl` commands which we can copy and paste into a terminal to run it locally, for example: +If you've upgraded to the latest version and you can't find it in our open issues list, then you'll need to tell us how to reproduce it. To make the behavior as clear as possible, please provide your steps as `curl` commands which we can copy and paste into a terminal to run it locally, for example: ```sh # delete the index @@ -47,11 +47,11 @@ curl -x PUT localhost:9200/test/test/1 -d '{ curl .... ``` -Provide as much information as you can. You may think that the problem lies with your query, when actually it depends on how your data is indexed. The easier it is for us to recreate your problem, the faster it is likely to be fixed. +Provide as much information as you can. You may think that the problem lies with your query, when actually it depends on how your data is indexed. The easier it is for us to recreate your problem, the faster it is likely to be fixed. It is generally always helpful to provide the basic details of your cluster configuration alongside your reproduction steps. ### Feature Requests -If you've thought of a way that OpenSearch could be better, we want to hear about it. We track feature requests using GitHub, so please feel free to open an issue which describes the feature you would like to see, why you need it, and how it should work. +If you've thought of a way that OpenSearch could be better, we want to hear about it. We track feature requests using GitHub, so please feel free to open an issue which describes the feature you would like to see, why you need it, and how it should work. After opening an issue, the fastest way to see your change made is to open a pull request following the requested changes you detailed in your issue. You can learn more about opening a pull request in the [contributing code section](#contributing-code). ### Documentation Changes @@ -146,12 +146,12 @@ Adding in the change is two step process: 2. Update the entry for your change in [`CHANGELOG.md`](CHANGELOG.md) and make sure that you reference the pull request there. ### Where should I put my CHANGELOG entry? -Please review the [branching strategy](https://github.com/opensearch-project/.github/blob/main/RELEASING.md#opensearch-branching) document. The changelog on the `main` branch will contain sections for the _next major_ and _next minor_ releases. Your entry should go into the section it is intended to be released in. In practice, most changes to `main` will be backported to the next minor release so most entries will likely be in that section. +Please review the [branching strategy](https://github.com/opensearch-project/.github/blob/main/RELEASING.md#opensearch-branching) document. The changelog on the `main` branch will contain **two files**: `CHANGELOG.md` which corresponds to unreleased changes intended for the _next minor_ release and `CHANGELOG-3.0.md` which correspond to unreleased changes intended for the _next major_ release. Your entry should go into file corresponding to the version it is intended to be released in. In practice, most changes to `main` will be backported to the next minor release so most entries will be in the `CHANGELOG.md` file. The following examples assume the _next major_ release on main is 3.0, then _next minor_ release is 2.5, and the _current_ release is 2.4. -- **Add a new feature to release in next minor:** Add a changelog entry to `[Unreleased 2.x]` on main, then backport to 2.x (including the changelog entry). -- **Introduce a breaking API change to release in next major:** Add a changelog entry to `[Unreleased 3.0]` on main, do not backport. +- **Add a new feature to release in next minor:** Add a changelog entry to `[Unreleased 2.x]` in CHANGELOG.md on main, then backport to 2.x (including the changelog entry). +- **Introduce a breaking API change to release in next major:** Add a changelog entry to `[Unreleased 3.0]` to CHANGELOG-3.0.md on main, do not backport. - **Upgrade a dependency to fix a CVE:** Add a changelog entry to `[Unreleased 2.x]` on main, then backport to 2.x (including the changelog entry), then backport to 2.4 and ensure the changelog entry is added to `[Unreleased 2.4.1]`. ## Review Process @@ -164,13 +164,19 @@ If we accept the PR, a [maintainer](MAINTAINERS.md) will merge your change and u If we reject the PR, we will close the pull request with a comment explaining why. This decision isn't always final: if you feel we have misunderstood your intended change or otherwise think that we should reconsider then please continue the conversation with a comment on the PR and we'll do our best to address any further points you raise. -## Troubleshooting Failing Builds +### Tips for Success (#tips) -The OpenSearch testing framework offers many capabilities but exhibits significant complexity (it does lot of randomization internally to cover as many edge cases and variations as possible). Unfortunately, this posses a challenge by making it harder to discover important issues/bugs in straightforward way and may lead to so called flaky tests - the tests which flip randomly from success to failure without any code changes. +We have a lot of mechanisms to help expedite towards an accepted PR. Here are some tips for success: +1. *Minimize BWC guarantees*: The first PR review cycle heavily focuses on the public facing APIs. This is what we have to "guarantee" as non-breaking for [bwc across major versions](./DEVELOPER_GUIDE.md#backwards-compatibility). +2. *Do not copy non-compliant code*: Ensure that code is APLv2 compatible. This means that you have not copied any code from other sources unless that code is also APLv2 compatible. +3. *Utilize feature flags*: Features that are safeguarded behind feature flags are more likely to be merged and backported, as they come with an additional layer of protection. Refer to this [example PR](https://github.com/opensearch-project/OpenSearch/pull/4959) for implementation details. +4. *Use appropriate Java tags*: + - `@opensearch.internal`: Marks internal classes subject to rapid changes. + - `@opensearch.api`: Marks public-facing API classes with backward compatibility guarantees. + - `@opensearch.experimental`: Indicates rapidly changing [experimental code](./DEVELOPER_GUIDE.md#experimental-development). +5. *Employ sandbox for significant core changes*: Any new features or enhancements that make changes to core classes (e.g., search phases, codecs, or specialized lucene APIs) are more likely to. be merged if they are sandboxed. This can only be enabled on the java CLI (`-Dsandbox.enabled=true`). +6. *Micro-benchmark critical path*: This is a lesser known mechanism, but if you have critical path changes you're afraid will impact performance (the changes touch the garbage collector, heap, direct memory, or CPU) then including a [microbenchmark](https://github.com/opensearch-project/OpenSearch/tree/main/benchmarks) with your PR (and jfr or flamegraph results in the description) is a *GREAT IDEA* and will help expedite the review process. +7. *Test rigorously*: Ensure thorough testing ([OpenSearchTestCase](./test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java) for unit tests, [OpenSearchIntegTestCase](./test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java) for integration & cluster tests, [OpenSearchRestTestCase](./test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java) for testing REST endpoint interfaces, and yaml tests with [ClientYamlTestSuiteIT](./rest-api-spec/src/yamlRestTest/java/org/opensearch/test/rest/ClientYamlTestSuiteIT.java) for REST integration tests) + +In general, adding more guardrails to your changes increases the likelihood of swift PR acceptance. We can always relax these guard rails in smaller followup PRs. Reverting a GA feature is much more difficult. Check out the [DEVELOPER_GUIDE](./DEVELOPER_GUIDE.md#submitting-changes) for more useful tips. -If your pull request reports a failing test(s) on one of the checks, please: - - look if there is an existing [issue](https://github.com/opensearch-project/OpenSearch/issues) reported for the test in question - - if not, please make sure this is not caused by your changes, run the failing test(s) locally for some time - - if you are sure the failure is not related, please open a new [bug](https://github.com/opensearch-project/OpenSearch/issues/new?assignees=&labels=bug%2C+untriaged&projects=&template=bug_template.md&title=%5BBUG%5D) with `flaky-test` label - - add a comment referencing the issue(s) or bug report(s) to your pull request explaining the failing build(s) - - as a bonus point, try to contribute by fixing the flaky test(s) diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index b582629aa03d0..b41997d7fc813 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -101,15 +101,14 @@ repositories { dependencies { api localGroovy() - - api 'commons-codec:commons-codec:1.16.0' - api 'org.apache.commons:commons-compress:1.25.0' - api 'org.apache.ant:ant:1.10.13' + api "commons-codec:commons-codec:${props.getProperty('commonscodec')}" + api "org.apache.commons:commons-compress:${props.getProperty('commonscompress')}" + api 'org.apache.ant:ant:1.10.14' api 'com.netflix.nebula:gradle-extra-configurations-plugin:10.0.0' api 'com.netflix.nebula:nebula-publishing-plugin:21.0.0' api 'com.netflix.nebula:gradle-info-plugin:12.1.6' api 'org.apache.rat:apache-rat:0.15' - api 'commons-io:commons-io:2.15.1' + api "commons-io:commons-io:${props.getProperty('commonsio')}" api "net.java.dev.jna:jna:5.14.0" api 'com.github.johnrengelman:shadow:8.1.1' api 'org.jdom:jdom2:2.0.6.1' diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 54032032c7ef5..d4d1e5abc8506 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -38,9 +38,10 @@ httpclient = 4.5.14 httpcore = 4.4.16 httpasyncclient = 4.1.5 commonslogging = 1.2 -commonscodec = 1.15 -commonslang = 3.13.0 -commonscompress = 1.24.0 +commonscodec = 1.16.1 +commonslang = 3.14.0 +commonscompress = 1.26.1 +commonsio = 2.16.0 # plugin dependencies aws = 2.20.86 reactivestreams = 1.0.4 diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java index bb705aebd2fcf..b962fa8ff415e 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java @@ -54,15 +54,19 @@ import org.opensearch.action.search.SearchScrollRequest; import org.opensearch.client.core.CountRequest; import org.opensearch.client.core.CountResponse; +import org.opensearch.common.geo.ShapeRelation; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.geometry.Rectangle; +import org.opensearch.index.query.GeoShapeQueryBuilder; import org.opensearch.index.query.MatchQueryBuilder; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryBuilders; +import org.opensearch.index.query.RangeQueryBuilder; import org.opensearch.index.query.ScriptQueryBuilder; import org.opensearch.index.query.TermsQueryBuilder; import org.opensearch.join.aggregations.Children; @@ -102,6 +106,8 @@ import org.opensearch.search.suggest.Suggest; import org.opensearch.search.suggest.SuggestBuilder; import org.opensearch.search.suggest.phrase.PhraseSuggestionBuilder; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; import org.hamcrest.Matchers; import org.junit.Before; @@ -116,6 +122,7 @@ import java.util.concurrent.TimeUnit; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.index.query.QueryBuilders.geoShapeQuery; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertToXContentEquivalent; import static org.hamcrest.Matchers.arrayContaining; import static org.hamcrest.Matchers.both; @@ -764,6 +771,228 @@ public void testSearchWithWeirdScriptFields() throws Exception { } } + public void testSearchWithDerivedFields() throws Exception { + // Just testing DerivedField definition from SearchSourceBuilder derivedField() + // We are not testing the full functionality here + Request doc = new Request("PUT", "test/_doc/1"); + doc.setJsonEntity("{\"field\":\"value\"}"); + client().performRequest(doc); + client().performRequest(new Request("POST", "/test/_refresh")); + // Keyword field + { + SearchRequest searchRequest = new SearchRequest("test").source( + SearchSourceBuilder.searchSource() + .derivedField("result", "keyword", new Script("emit(params._source[\"field\"])")) + .fetchField("result") + .query(new TermsQueryBuilder("result", "value")) + ); + SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + SearchHit searchHit = searchResponse.getHits().getAt(0); + List values = searchHit.getFields().get("result").getValues(); + assertNotNull(values); + assertEquals(1, values.size()); + assertEquals("value", values.get(0)); + + // multi valued + searchRequest = new SearchRequest("test").source( + SearchSourceBuilder.searchSource() + .derivedField( + "result", + "keyword", + new Script("emit(params._source[\"field\"]);emit(params._source[\"field\"] + \"_2\")") + ) + .query(new TermsQueryBuilder("result", "value_2")) + .fetchField("result") + ); + searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + searchHit = searchResponse.getHits().getAt(0); + values = searchHit.getFields().get("result").getValues(); + assertNotNull(values); + assertEquals(2, values.size()); + assertEquals("value", values.get(0)); + assertEquals("value_2", values.get(1)); + } + // Boolean field + { + SearchRequest searchRequest = new SearchRequest("test").source( + SearchSourceBuilder.searchSource() + .derivedField("result", "boolean", new Script("emit(((String)params._source[\"field\"]).equals(\"value\"))")) + .query(new TermsQueryBuilder("result", "true")) + .fetchField("result") + ); + SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + SearchHit searchHit = searchResponse.getHits().getAt(0); + List values = searchHit.getFields().get("result").getValues(); + assertNotNull(values); + assertEquals(1, values.size()); + assertEquals(true, values.get(0)); + } + // Long field + { + SearchRequest searchRequest = new SearchRequest("test").source( + SearchSourceBuilder.searchSource() + .derivedField("result", "long", new Script("emit(Long.MAX_VALUE)")) + .query(new RangeQueryBuilder("result").from(Long.MAX_VALUE - 1).to(Long.MAX_VALUE)) + .fetchField("result") + ); + + SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + SearchHit searchHit = searchResponse.getHits().getAt(0); + List values = searchHit.getFields().get("result").getValues(); + assertNotNull(values); + assertEquals(1, values.size()); + assertEquals(Long.MAX_VALUE, values.get(0)); + + // multi-valued + searchRequest = new SearchRequest("test").source( + SearchSourceBuilder.searchSource() + .derivedField("result", "long", new Script("emit(Long.MAX_VALUE); emit(Long.MIN_VALUE);")) + .query(new RangeQueryBuilder("result").from(Long.MIN_VALUE).to(Long.MIN_VALUE + 1)) + .fetchField("result") + ); + + searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + searchHit = searchResponse.getHits().getAt(0); + values = searchHit.getFields().get("result").getValues(); + assertNotNull(values); + assertEquals(2, values.size()); + assertEquals(Long.MAX_VALUE, values.get(0)); + assertEquals(Long.MIN_VALUE, values.get(1)); + } + // Double field + { + SearchRequest searchRequest = new SearchRequest("test").source( + SearchSourceBuilder.searchSource() + .derivedField("result", "double", new Script("emit(Double.MAX_VALUE)")) + .query(new RangeQueryBuilder("result").from(Double.MAX_VALUE - 1).to(Double.MAX_VALUE)) + .fetchField("result") + ); + SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + SearchHit searchHit = searchResponse.getHits().getAt(0); + List values = searchHit.getFields().get("result").getValues(); + assertNotNull(values); + assertEquals(1, values.size()); + assertEquals(Double.MAX_VALUE, values.get(0)); + + // multi-valued + searchRequest = new SearchRequest("test").source( + SearchSourceBuilder.searchSource() + .derivedField("result", "double", new Script("emit(Double.MAX_VALUE); emit(Double.MIN_VALUE);")) + .query(new RangeQueryBuilder("result").from(Double.MIN_VALUE).to(Double.MIN_VALUE + 1)) + .fetchField("result") + ); + + searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + searchHit = searchResponse.getHits().getAt(0); + values = searchHit.getFields().get("result").getValues(); + assertNotNull(values); + assertEquals(2, values.size()); + assertEquals(Double.MAX_VALUE, values.get(0)); + assertEquals(Double.MIN_VALUE, values.get(1)); + } + // Date field + { + DateTime date1 = new DateTime(1990, 12, 29, 0, 0, DateTimeZone.UTC); + DateTime date2 = new DateTime(1990, 12, 30, 0, 0, DateTimeZone.UTC); + SearchRequest searchRequest = new SearchRequest("test").source( + SearchSourceBuilder.searchSource() + .derivedField("result", "date", new Script("emit(" + date1.getMillis() + "L)")) + .query(new RangeQueryBuilder("result").from(date1.toString()).to(date2.toString())) + .fetchField("result") + ); + + SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + SearchHit searchHit = searchResponse.getHits().getAt(0); + List values = searchHit.getFields().get("result").getValues(); + assertNotNull(values); + assertEquals(1, values.size()); + assertEquals(date1.toString(), values.get(0)); + + // multi-valued + searchRequest = new SearchRequest("test").source( + SearchSourceBuilder.searchSource() + .derivedField("result", "date", new Script("emit(" + date1.getMillis() + "L); " + "emit(" + date2.getMillis() + "L)")) + .query(new RangeQueryBuilder("result").from(date1.toString()).to(date2.toString())) + .fetchField("result") + ); + + searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + searchHit = searchResponse.getHits().getAt(0); + values = searchHit.getFields().get("result").getValues(); + assertNotNull(values); + assertEquals(2, values.size()); + assertEquals(date1.toString(), values.get(0)); + assertEquals(date2.toString(), values.get(1)); + } + // Geo field + { + GeoShapeQueryBuilder qb = geoShapeQuery("result", new Rectangle(-35, 35, 35, -35)); + qb.relation(ShapeRelation.INTERSECTS); + SearchRequest searchRequest = new SearchRequest("test").source( + SearchSourceBuilder.searchSource() + .derivedField("result", "geo_point", new Script("emit(10.0, 20.0)")) + .query(qb) + .fetchField("result") + ); + + SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + SearchHit searchHit = searchResponse.getHits().getAt(0); + List values = searchHit.getFields().get("result").getValues(); + assertNotNull(values); + assertEquals(1, values.size()); + assertEquals(10.0, ((HashMap) values.get(0)).get("lat")); + assertEquals(20.0, ((HashMap) values.get(0)).get("lon")); + + // multi-valued + searchRequest = new SearchRequest("test").source( + SearchSourceBuilder.searchSource() + .derivedField("result", "geo_point", new Script("emit(10.0, 20.0); emit(20.0, 30.0);")) + .query(qb) + .fetchField("result") + ); + + searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + searchHit = searchResponse.getHits().getAt(0); + values = searchHit.getFields().get("result").getValues(); + assertNotNull(values); + assertEquals(2, values.size()); + assertEquals(10.0, ((HashMap) values.get(0)).get("lat")); + assertEquals(20.0, ((HashMap) values.get(0)).get("lon")); + assertEquals(20.0, ((HashMap) values.get(1)).get("lat")); + assertEquals(30.0, ((HashMap) values.get(1)).get("lon")); + } + // IP field + { + SearchRequest searchRequest = new SearchRequest("test").source( + SearchSourceBuilder.searchSource().derivedField("result", "ip", new Script("emit(\"10.0.0.1\")")).fetchField("result") + ); + + SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + SearchHit searchHit = searchResponse.getHits().getAt(0); + List values = searchHit.getFields().get("result").getValues(); + assertNotNull(values); + assertEquals(1, values.size()); + assertEquals("10.0.0.1", values.get(0)); + + // multi-valued + searchRequest = new SearchRequest("test").source( + SearchSourceBuilder.searchSource() + .derivedField("result", "ip", new Script("emit(\"10.0.0.1\"); emit(\"10.0.0.2\");")) + .fetchField("result") + ); + + searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + searchHit = searchResponse.getHits().getAt(0); + values = searchHit.getFields().get("result").getValues(); + assertNotNull(values); + assertEquals(2, values.size()); + assertEquals("10.0.0.1", values.get(0)); + assertEquals("10.0.0.2", values.get(1)); + + } + + } + public void testSearchScroll() throws Exception { for (int i = 0; i < 100; i++) { XContentBuilder builder = jsonBuilder().startObject().field("field", i).endObject(); diff --git a/client/rest/licenses/commons-codec-1.15.jar.sha1 b/client/rest/licenses/commons-codec-1.15.jar.sha1 deleted file mode 100644 index 62d99837b87e1..0000000000000 --- a/client/rest/licenses/commons-codec-1.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/client/rest/licenses/commons-codec-1.16.1.jar.sha1 b/client/rest/licenses/commons-codec-1.16.1.jar.sha1 new file mode 100644 index 0000000000000..6b8803089c6d7 --- /dev/null +++ b/client/rest/licenses/commons-codec-1.16.1.jar.sha1 @@ -0,0 +1 @@ +47bd4d333fba53406f6c6c51884ddbca435c8862 \ No newline at end of file diff --git a/client/sniffer/licenses/commons-codec-1.15.jar.sha1 b/client/sniffer/licenses/commons-codec-1.15.jar.sha1 deleted file mode 100644 index 62d99837b87e1..0000000000000 --- a/client/sniffer/licenses/commons-codec-1.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/client/sniffer/licenses/commons-codec-1.16.1.jar.sha1 b/client/sniffer/licenses/commons-codec-1.16.1.jar.sha1 new file mode 100644 index 0000000000000..6b8803089c6d7 --- /dev/null +++ b/client/sniffer/licenses/commons-codec-1.16.1.jar.sha1 @@ -0,0 +1 @@ +47bd4d333fba53406f6c6c51884ddbca435c8862 \ No newline at end of file diff --git a/distribution/tools/plugin-cli/build.gradle b/distribution/tools/plugin-cli/build.gradle index f40fb1c4b0a9f..446dbaad8466e 100644 --- a/distribution/tools/plugin-cli/build.gradle +++ b/distribution/tools/plugin-cli/build.gradle @@ -44,7 +44,7 @@ dependencies { testRuntimeOnly("com.google.guava:guava:${versions.guava}") { transitive = false } - + api "commons-io:commons-io:${versions.commonsio}" implementation "org.apache.commons:commons-compress:${versions.commonscompress}" } @@ -104,5 +104,8 @@ thirdPartyAudit.ignoreMissingClasses( 'org.tukaani.xz.MemoryLimitException', 'org.tukaani.xz.UnsupportedOptionsException', 'org.tukaani.xz.XZ', - 'org.tukaani.xz.XZOutputStream' + 'org.tukaani.xz.XZOutputStream', + 'org.apache.commons.codec.digest.PureJavaCrc32C', + 'org.apache.commons.codec.digest.XXHash32', + 'org.apache.commons.lang3.reflect.FieldUtils' ) diff --git a/distribution/tools/plugin-cli/licenses/commons-compress-1.24.0.jar.sha1 b/distribution/tools/plugin-cli/licenses/commons-compress-1.24.0.jar.sha1 deleted file mode 100644 index 23999d1bfbde4..0000000000000 --- a/distribution/tools/plugin-cli/licenses/commons-compress-1.24.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b4b1b5a3d9573b2970fddab236102c0a4d27d35e \ No newline at end of file diff --git a/distribution/tools/plugin-cli/licenses/commons-compress-1.26.1.jar.sha1 b/distribution/tools/plugin-cli/licenses/commons-compress-1.26.1.jar.sha1 new file mode 100644 index 0000000000000..912bda85de18a --- /dev/null +++ b/distribution/tools/plugin-cli/licenses/commons-compress-1.26.1.jar.sha1 @@ -0,0 +1 @@ +44331c1130c370e726a2e1a3e6fba6d2558ef04a \ No newline at end of file diff --git a/distribution/tools/plugin-cli/licenses/commons-io-2.16.0.jar.sha1 b/distribution/tools/plugin-cli/licenses/commons-io-2.16.0.jar.sha1 new file mode 100644 index 0000000000000..6a7b638719fa3 --- /dev/null +++ b/distribution/tools/plugin-cli/licenses/commons-io-2.16.0.jar.sha1 @@ -0,0 +1 @@ +27875a7935f1ddcc13267eb6fae1f719e0409572 \ No newline at end of file diff --git a/distribution/tools/plugin-cli/licenses/commons-io-LICENSE.txt b/distribution/tools/plugin-cli/licenses/commons-io-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/distribution/tools/plugin-cli/licenses/commons-io-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/distribution/tools/plugin-cli/licenses/commons-io-NOTICE.txt b/distribution/tools/plugin-cli/licenses/commons-io-NOTICE.txt new file mode 100644 index 0000000000000..a6b77d1eb6089 --- /dev/null +++ b/distribution/tools/plugin-cli/licenses/commons-io-NOTICE.txt @@ -0,0 +1,5 @@ +Apache Commons IO +Copyright 2002-2014 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). diff --git a/doc-tools/missing-doclet/src/main/java/org/opensearch/missingdoclet/MissingDoclet.java b/doc-tools/missing-doclet/src/main/java/org/opensearch/missingdoclet/MissingDoclet.java index e1ad55fe4b60b..470ab75b7930c 100644 --- a/doc-tools/missing-doclet/src/main/java/org/opensearch/missingdoclet/MissingDoclet.java +++ b/doc-tools/missing-doclet/src/main/java/org/opensearch/missingdoclet/MissingDoclet.java @@ -332,13 +332,20 @@ private void checkComment(Element element) { // Ignore classes annotated with @Generated and all enclosed elements in them. private boolean isGenerated(Element element) { - return element + final boolean isGenerated = element .getAnnotationMirrors() .stream() .anyMatch(m -> m .getAnnotationType() .toString() /* ClassSymbol.toString() returns class name */ .equalsIgnoreCase("javax.annotation.Generated")); + + if (!isGenerated && element.getEnclosingElement() != null) { + // check if enclosing element is generated + return isGenerated(element.getEnclosingElement()); + } + + return isGenerated; } private boolean hasInheritedJavadocs(Element element) { diff --git a/libs/common/src/main/java/org/opensearch/common/Glob.java b/libs/common/src/main/java/org/opensearch/common/Glob.java index daf045dd49e3a..b390a3ca84182 100644 --- a/libs/common/src/main/java/org/opensearch/common/Glob.java +++ b/libs/common/src/main/java/org/opensearch/common/Glob.java @@ -52,34 +52,35 @@ public static boolean globMatch(String pattern, String str) { if (pattern == null || str == null) { return false; } - int firstIndex = pattern.indexOf('*'); - if (firstIndex == -1) { - return pattern.equals(str); - } - if (firstIndex == 0) { - if (pattern.length() == 1) { - return true; - } - int nextIndex = pattern.indexOf('*', firstIndex + 1); - if (nextIndex == -1) { - return str.endsWith(pattern.substring(1)); - } else if (nextIndex == 1) { - // Double wildcard "**" - skipping the first "*" - return globMatch(pattern.substring(1), str); + int sIdx = 0, pIdx = 0, match = 0, wildcardIdx = -1; + while (sIdx < str.length()) { + // both chars matching, incrementing both pointers + if (pIdx < pattern.length() && str.charAt(sIdx) == pattern.charAt(pIdx)) { + sIdx++; + pIdx++; + } else if (pIdx < pattern.length() && pattern.charAt(pIdx) == '*') { + // wildcard found, only incrementing pattern pointer + wildcardIdx = pIdx; + match = sIdx; + pIdx++; + } else if (wildcardIdx != -1) { + // last pattern pointer was a wildcard, incrementing string pointer + pIdx = wildcardIdx + 1; + match++; + sIdx = match; + } else { + // current pattern pointer is not a wildcard, last pattern pointer was also not a wildcard + // characters do not match + return false; } - String part = pattern.substring(1, nextIndex); - int partIndex = str.indexOf(part); - while (partIndex != -1) { - if (globMatch(pattern.substring(nextIndex), str.substring(partIndex + part.length()))) { - return true; - } - partIndex = str.indexOf(part, partIndex + 1); - } - return false; } - return (str.length() >= firstIndex - && pattern.substring(0, firstIndex).equals(str.substring(0, firstIndex)) - && globMatch(pattern.substring(firstIndex), str.substring(firstIndex))); + + // check for remaining characters in pattern + while (pIdx < pattern.length() && pattern.charAt(pIdx) == '*') { + pIdx++; + } + + return pIdx == pattern.length(); } } diff --git a/libs/core/src/main/java/org/opensearch/Version.java b/libs/core/src/main/java/org/opensearch/Version.java index 1f88f518101c6..3c9db60f7be8f 100644 --- a/libs/core/src/main/java/org/opensearch/Version.java +++ b/libs/core/src/main/java/org/opensearch/Version.java @@ -127,6 +127,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_2_12_0 = new Version(2120099, org.apache.lucene.util.Version.LUCENE_9_9_2); public static final Version V_2_12_1 = new Version(2120199, org.apache.lucene.util.Version.LUCENE_9_9_2); public static final Version V_2_13_0 = new Version(2130099, org.apache.lucene.util.Version.LUCENE_9_10_0); + public static final Version V_2_13_1 = new Version(2130199, org.apache.lucene.util.Version.LUCENE_9_10_0); public static final Version V_2_14_0 = new Version(2140099, org.apache.lucene.util.Version.LUCENE_9_10_0); public static final Version CURRENT = V_2_14_0; diff --git a/libs/core/src/main/java/org/opensearch/core/common/io/stream/FilterStreamInput.java b/libs/core/src/main/java/org/opensearch/core/common/io/stream/FilterStreamInput.java index a6e49567ac7d5..ee67fd4f271a2 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/io/stream/FilterStreamInput.java +++ b/libs/core/src/main/java/org/opensearch/core/common/io/stream/FilterStreamInput.java @@ -80,6 +80,16 @@ public void reset() throws IOException { delegate.reset(); } + @Override + public void mark(int readlimit) { + delegate.mark(readlimit); + } + + @Override + public boolean markSupported() { + return delegate.markSupported(); + } + @Override public int read() throws IOException { return delegate.read(); diff --git a/libs/core/src/test/java/org/opensearch/core/common/io/stream/FilterStreamInputTests.java b/libs/core/src/test/java/org/opensearch/core/common/io/stream/FilterStreamInputTests.java index a044586e095e3..ab6dfbc2feb25 100644 --- a/libs/core/src/test/java/org/opensearch/core/common/io/stream/FilterStreamInputTests.java +++ b/libs/core/src/test/java/org/opensearch/core/common/io/stream/FilterStreamInputTests.java @@ -12,6 +12,9 @@ import org.opensearch.core.common.bytes.BytesReference; import java.io.IOException; +import java.nio.ByteBuffer; + +import static org.hamcrest.Matchers.is; /** test the FilterStreamInput using the same BaseStreamTests */ public class FilterStreamInputTests extends BaseStreamTests { @@ -21,4 +24,24 @@ protected StreamInput getStreamInput(BytesReference bytesReference) throws IOExc return new FilterStreamInput(StreamInput.wrap(br.bytes, br.offset, br.length)) { }; } + + public void testMarkAndReset() throws IOException { + FilterStreamInputTests filterStreamInputTests = new FilterStreamInputTests(); + + ByteBuffer buffer = ByteBuffer.wrap(new byte[20]); + for (int i = 0; i < buffer.limit(); i++) { + buffer.put((byte) i); + } + buffer.rewind(); + BytesReference bytesReference = BytesReference.fromByteBuffer(buffer); + StreamInput streamInput = filterStreamInputTests.getStreamInput(bytesReference); + streamInput.read(); + assertThat(streamInput.markSupported(), is(true)); + streamInput.mark(-1); + int int1 = streamInput.read(); + int int2 = streamInput.read(); + streamInput.reset(); + assertEquals(int1, streamInput.read()); + assertEquals(int2, streamInput.read()); + } } diff --git a/libs/x-content/src/test/java/org/opensearch/common/xcontent/XContentParserTests.java b/libs/x-content/src/test/java/org/opensearch/common/xcontent/XContentParserTests.java index 3d5aa36caf171..069cb4cd3889f 100644 --- a/libs/x-content/src/test/java/org/opensearch/common/xcontent/XContentParserTests.java +++ b/libs/x-content/src/test/java/org/opensearch/common/xcontent/XContentParserTests.java @@ -79,8 +79,8 @@ public class XContentParserTests extends OpenSearchTestCase { () -> randomAlphaOfLengthBetween(1, SmileXContent.DEFAULT_MAX_STRING_LEN / 10), /* limit to ~200Mb */ /* YAML parser limitation */ XContentType.YAML, - /* use 75% of the limit, difficult to get the exact size of the content right */ - () -> randomRealisticUnicodeOfCodepointLengthBetween(1, (int) (YamlXContent.DEFAULT_CODEPOINT_LIMIT * 0.75)) + /* use 50% of the limit, difficult to get the exact size of the content right */ + () -> randomRealisticUnicodeOfCodepointLengthBetween(1, (int) (YamlXContent.DEFAULT_CODEPOINT_LIMIT * 0.50)) ); private static final Map> FIELD_NAME_GENERATORS = Map.of( diff --git a/modules/cache-common/src/internalClusterTest/java/org.opensearch.cache.common.tier/TieredSpilloverCacheIT.java b/modules/cache-common/src/internalClusterTest/java/org.opensearch.cache.common.tier/TieredSpilloverCacheIT.java index 568ac4d188c51..977a66c53b7e8 100644 --- a/modules/cache-common/src/internalClusterTest/java/org.opensearch.cache.common.tier/TieredSpilloverCacheIT.java +++ b/modules/cache-common/src/internalClusterTest/java/org.opensearch.cache.common.tier/TieredSpilloverCacheIT.java @@ -12,21 +12,31 @@ import org.opensearch.action.admin.cluster.node.info.NodesInfoRequest; import org.opensearch.action.admin.cluster.node.info.NodesInfoResponse; import org.opensearch.action.admin.cluster.node.info.PluginsAndModules; +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.opensearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; +import org.opensearch.action.admin.indices.cache.clear.ClearIndicesCacheResponse; +import org.opensearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.SearchType; import org.opensearch.client.Client; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.cache.CacheType; import org.opensearch.common.cache.ICache; import org.opensearch.common.cache.settings.CacheSettings; import org.opensearch.common.cache.store.OpenSearchOnHeapCache; +import org.opensearch.common.cache.store.settings.OpenSearchOnHeapCacheSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.FeatureFlags; +import org.opensearch.index.cache.request.RequestCacheStats; +import org.opensearch.index.query.QueryBuilders; import org.opensearch.indices.IndicesRequestCache; import org.opensearch.plugins.CachePlugin; import org.opensearch.plugins.Plugin; import org.opensearch.plugins.PluginInfo; import org.opensearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.hamcrest.OpenSearchAssertions; import org.junit.Assert; import java.time.ZoneId; @@ -34,15 +44,20 @@ import java.util.Collection; import java.util.List; import java.util.Map; +import java.util.concurrent.TimeUnit; import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.Stream; +import static org.opensearch.common.cache.store.settings.OpenSearchOnHeapCacheSettings.MAXIMUM_SIZE_IN_BYTES_KEY; +import static org.opensearch.indices.IndicesService.INDICES_CACHE_CLEAN_INTERVAL_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.dateHistogram; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.greaterThan; +@OpenSearchIntegTestCase.ClusterScope(numDataNodes = 0, scope = OpenSearchIntegTestCase.Scope.TEST) public class TieredSpilloverCacheIT extends OpenSearchIntegTestCase { @Override @@ -50,15 +65,9 @@ protected Collection> nodePlugins() { return Arrays.asList(TieredSpilloverCachePlugin.class, MockDiskCachePlugin.class); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.PLUGGABLE_CACHE, "true").build(); - } - - @Override - protected Settings nodeSettings(int nodeOrdinal) { + private Settings defaultSettings(String onHeapCacheSizeInBytesOrPecentage) { return Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) + .put(FeatureFlags.PLUGGABLE_CACHE, "true") .put( CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE).getKey(), TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME @@ -75,10 +84,17 @@ protected Settings nodeSettings(int nodeOrdinal) { ).getKey(), MockDiskCache.MockDiskCacheFactory.NAME ) + .put( + OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(MAXIMUM_SIZE_IN_BYTES_KEY) + .getKey(), + onHeapCacheSizeInBytesOrPecentage + ) .build(); } public void testPluginsAreInstalled() { + internalCluster().startNode(Settings.builder().put(defaultSettings("1%")).build()); NodesInfoRequest nodesInfoRequest = new NodesInfoRequest(); nodesInfoRequest.addMetric(NodesInfoRequest.Metric.PLUGINS.metricName()); NodesInfoResponse nodesInfoResponse = OpenSearchIntegTestCase.client().admin().cluster().nodesInfo(nodesInfoRequest).actionGet(); @@ -90,11 +106,12 @@ public void testPluginsAreInstalled() { .collect(Collectors.toList()); Assert.assertTrue( pluginInfos.stream() - .anyMatch(pluginInfo -> pluginInfo.getName().equals("org.opensearch.cache.common" + ".tier.TieredSpilloverCachePlugin")) + .anyMatch(pluginInfo -> pluginInfo.getName().equals("org.opensearch.cache.common.tier.TieredSpilloverCachePlugin")) ); } public void testSanityChecksWithIndicesRequestCache() throws InterruptedException { + internalCluster().startNodes(3, Settings.builder().put(defaultSettings("1%")).build()); Client client = client(); assertAcked( client.admin() @@ -118,10 +135,7 @@ public void testSanityChecksWithIndicesRequestCache() throws InterruptedExceptio .setSize(0) .setSearchType(SearchType.QUERY_THEN_FETCH) .addAggregation( - dateHistogram("histo").field("f") - .timeZone(ZoneId.of("+01:00")) - .minDocCount(0) - .dateHistogramInterval(DateHistogramInterval.MONTH) + dateHistogram("histo").field("f").timeZone(ZoneId.of("+01:00")).minDocCount(0).calendarInterval(DateHistogramInterval.MONTH) ) .get(); assertSearchResponse(r1); @@ -133,6 +147,288 @@ public void testSanityChecksWithIndicesRequestCache() throws InterruptedExceptio ); } + public void testWithDynamicTookTimePolicy() throws Exception { + int onHeapCacheSizeInBytes = 2000; + internalCluster().startNode(Settings.builder().put(defaultSettings(onHeapCacheSizeInBytes + "b")).build()); + Client client = client(); + assertAcked( + client.admin() + .indices() + .prepareCreate("index") + .setMapping("k", "type=keyword") + .setSettings( + Settings.builder() + .put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.refresh_interval", -1) + ) + .get() + ); + // Step 1 : Set a very high value for took time policy so that no items evicted from onHeap cache are spilled + // to disk. And then hit requests so that few items are cached into cache. + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest().transientSettings( + Settings.builder() + .put( + TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), + new TimeValue(100, TimeUnit.SECONDS) + ) + .build() + ); + assertAcked(internalCluster().client().admin().cluster().updateSettings(updateSettingsRequest).get()); + int numberOfIndexedItems = randomIntBetween(6, 10); + for (int iterator = 0; iterator < numberOfIndexedItems; iterator++) { + indexRandom(true, client.prepareIndex("index").setSource("k" + iterator, "hello" + iterator)); + } + ensureSearchable("index"); + refreshAndWaitForReplication(); + // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache + ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge("index").setFlush(true).get(); + OpenSearchAssertions.assertAllSuccessful(forceMergeResponse); + long perQuerySizeInCacheInBytes = -1; + for (int iterator = 0; iterator < numberOfIndexedItems; iterator++) { + SearchResponse resp = client.prepareSearch("index") + .setRequestCache(true) + .setQuery(QueryBuilders.termQuery("k" + iterator, "hello" + iterator)) + .get(); + if (perQuerySizeInCacheInBytes == -1) { + RequestCacheStats requestCacheStats = getRequestCacheStats(client, "index"); + perQuerySizeInCacheInBytes = requestCacheStats.getMemorySizeInBytes(); + } + assertSearchResponse(resp); + } + RequestCacheStats requestCacheStats = getRequestCacheStats(client, "index"); + // Considering disk cache won't be used due to took time policy having a high value, we expect overall cache + // size to be less than or equal to onHeapCache size. + assertTrue(requestCacheStats.getMemorySizeInBytes() <= onHeapCacheSizeInBytes); + long entriesInCache = requestCacheStats.getMemorySizeInBytes() / perQuerySizeInCacheInBytes; + // All should be misses in the first attempt + assertEquals(numberOfIndexedItems, requestCacheStats.getMissCount()); + assertEquals(numberOfIndexedItems - entriesInCache, requestCacheStats.getEvictions()); + assertEquals(0, requestCacheStats.getHitCount()); + + // Step 2: Again hit same set of queries as above, we still won't see any hits as items keeps getting evicted. + for (int iterator = 0; iterator < numberOfIndexedItems; iterator++) { + SearchResponse resp = client.prepareSearch("index") + .setRequestCache(true) + .setQuery(QueryBuilders.termQuery("k" + iterator, "hello" + iterator)) + .get(); + assertSearchResponse(resp); + } + requestCacheStats = getRequestCacheStats(client, "index"); + // We still won't get any hits as items keep getting evicted in LRU fashion due to limited cache size. + assertTrue(requestCacheStats.getMemorySizeInBytes() <= onHeapCacheSizeInBytes); + assertEquals(numberOfIndexedItems * 2, requestCacheStats.getMissCount()); + assertEquals(numberOfIndexedItems * 2 - entriesInCache, requestCacheStats.getEvictions()); + assertEquals(0, requestCacheStats.getHitCount()); + long lastEvictionSeen = requestCacheStats.getEvictions(); + + // Step 3: Decrease took time policy to zero so that disk cache also comes into play. Now we should be able + // to cache all entries. + updateSettingsRequest = new ClusterUpdateSettingsRequest().transientSettings( + Settings.builder() + .put( + TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), + new TimeValue(0, TimeUnit.MILLISECONDS) + ) + .build() + ); + assertAcked(internalCluster().client().admin().cluster().updateSettings(updateSettingsRequest).get()); + for (int iterator = 0; iterator < numberOfIndexedItems; iterator++) { + SearchResponse resp = client.prepareSearch("index") + .setRequestCache(true) + .setQuery(QueryBuilders.termQuery("k" + iterator, "hello" + iterator)) + .get(); + assertSearchResponse(resp); + } + requestCacheStats = getRequestCacheStats(client, "index"); + // All entries should get cached. + assertEquals(numberOfIndexedItems * perQuerySizeInCacheInBytes, requestCacheStats.getMemorySizeInBytes()); + // No more evictions seen when compared with last step. + assertEquals(0, requestCacheStats.getEvictions() - lastEvictionSeen); + // Hit count should be equal to number of cache entries present in previous step. + assertEquals(entriesInCache, requestCacheStats.getHitCount()); + assertEquals(numberOfIndexedItems * 3 - entriesInCache, requestCacheStats.getMissCount()); + long lastHitCountSeen = requestCacheStats.getHitCount(); + long lastMissCountSeen = requestCacheStats.getMissCount(); + + // Step 4: Again hit the same requests, we should get hits for all entries. + for (int iterator = 0; iterator < numberOfIndexedItems; iterator++) { + SearchResponse resp = client.prepareSearch("index") + .setRequestCache(true) + .setQuery(QueryBuilders.termQuery("k" + iterator, "hello" + iterator)) + .get(); + assertSearchResponse(resp); + } + requestCacheStats = getRequestCacheStats(client, "index"); + // All entries should get cached. + assertEquals(numberOfIndexedItems * perQuerySizeInCacheInBytes, requestCacheStats.getMemorySizeInBytes()); + // No more evictions seen when compared with last step. + assertEquals(0, requestCacheStats.getEvictions() - lastEvictionSeen); + assertEquals(lastHitCountSeen + numberOfIndexedItems, requestCacheStats.getHitCount()); + assertEquals(0, lastMissCountSeen - requestCacheStats.getMissCount()); + } + + public void testInvalidationWithIndicesRequestCache() throws Exception { + int onHeapCacheSizeInBytes = 2000; + internalCluster().startNode( + Settings.builder() + .put(defaultSettings(onHeapCacheSizeInBytes + "b")) + .put(INDICES_CACHE_CLEAN_INTERVAL_SETTING.getKey(), new TimeValue(1)) + .build() + ); + Client client = client(); + assertAcked( + client.admin() + .indices() + .prepareCreate("index") + .setMapping("k", "type=keyword") + .setSettings( + Settings.builder() + .put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.refresh_interval", -1) + ) + .get() + ); + // Update took time policy to zero so that all entries are eligible to be cached on disk. + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest().transientSettings( + Settings.builder() + .put( + TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), + new TimeValue(0, TimeUnit.MILLISECONDS) + ) + .build() + ); + assertAcked(internalCluster().client().admin().cluster().updateSettings(updateSettingsRequest).get()); + int numberOfIndexedItems = randomIntBetween(5, 10); + for (int iterator = 0; iterator < numberOfIndexedItems; iterator++) { + indexRandom(true, client.prepareIndex("index").setSource("k" + iterator, "hello" + iterator)); + } + ensureSearchable("index"); + refreshAndWaitForReplication(); + // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache + ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge("index").setFlush(true).get(); + OpenSearchAssertions.assertAllSuccessful(forceMergeResponse); + long perQuerySizeInCacheInBytes = -1; + for (int iterator = 0; iterator < numberOfIndexedItems; iterator++) { + SearchResponse resp = client.prepareSearch("index") + .setRequestCache(true) + .setQuery(QueryBuilders.termQuery("k" + iterator, "hello" + iterator)) + .get(); + if (perQuerySizeInCacheInBytes == -1) { + RequestCacheStats requestCacheStats = getRequestCacheStats(client, "index"); + perQuerySizeInCacheInBytes = requestCacheStats.getMemorySizeInBytes(); + } + assertSearchResponse(resp); + } + RequestCacheStats requestCacheStats = getRequestCacheStats(client, "index"); + assertEquals(numberOfIndexedItems, requestCacheStats.getMissCount()); + assertEquals(0, requestCacheStats.getHitCount()); + assertEquals(0, requestCacheStats.getEvictions()); + assertEquals(perQuerySizeInCacheInBytes * numberOfIndexedItems, requestCacheStats.getMemorySizeInBytes()); + for (int iterator = 0; iterator < numberOfIndexedItems; iterator++) { + SearchResponse resp = client.prepareSearch("index") + .setRequestCache(true) + .setQuery(QueryBuilders.termQuery("k" + iterator, "hello" + iterator)) + .get(); + assertSearchResponse(resp); + } + requestCacheStats = client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache(); + assertEquals(numberOfIndexedItems, requestCacheStats.getHitCount()); + assertEquals(numberOfIndexedItems, requestCacheStats.getMissCount()); + assertEquals(perQuerySizeInCacheInBytes * numberOfIndexedItems, requestCacheStats.getMemorySizeInBytes()); + assertEquals(0, requestCacheStats.getEvictions()); + // Explicit refresh would invalidate cache entries. + refreshAndWaitForReplication(); + assertBusy(() -> { + // Explicit refresh should clear up cache entries + assertTrue(getRequestCacheStats(client, "index").getMemorySizeInBytes() == 0); + }, 1, TimeUnit.SECONDS); + requestCacheStats = client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache(); + assertEquals(0, requestCacheStats.getMemorySizeInBytes()); + // Hits and misses stats shouldn't get cleared up. + assertEquals(numberOfIndexedItems, requestCacheStats.getHitCount()); + assertEquals(numberOfIndexedItems, requestCacheStats.getMissCount()); + } + + public void testWithExplicitCacheClear() throws Exception { + int onHeapCacheSizeInBytes = 2000; + internalCluster().startNode( + Settings.builder() + .put(defaultSettings(onHeapCacheSizeInBytes + "b")) + .put(INDICES_CACHE_CLEAN_INTERVAL_SETTING.getKey(), new TimeValue(1)) + .build() + ); + Client client = client(); + assertAcked( + client.admin() + .indices() + .prepareCreate("index") + .setMapping("k", "type=keyword") + .setSettings( + Settings.builder() + .put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.refresh_interval", -1) + ) + .get() + ); + // Update took time policy to zero so that all entries are eligible to be cached on disk. + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest().transientSettings( + Settings.builder() + .put( + TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), + new TimeValue(0, TimeUnit.MILLISECONDS) + ) + .build() + ); + assertAcked(internalCluster().client().admin().cluster().updateSettings(updateSettingsRequest).get()); + int numberOfIndexedItems = randomIntBetween(5, 10); + for (int iterator = 0; iterator < numberOfIndexedItems; iterator++) { + indexRandom(true, client.prepareIndex("index").setSource("k" + iterator, "hello" + iterator)); + } + ensureSearchable("index"); + refreshAndWaitForReplication(); + // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache + ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge("index").setFlush(true).get(); + OpenSearchAssertions.assertAllSuccessful(forceMergeResponse); + + long perQuerySizeInCacheInBytes = -1; + for (int iterator = 0; iterator < numberOfIndexedItems; iterator++) { + SearchResponse resp = client.prepareSearch("index") + .setRequestCache(true) + .setQuery(QueryBuilders.termQuery("k" + iterator, "hello" + iterator)) + .get(); + if (perQuerySizeInCacheInBytes == -1) { + RequestCacheStats requestCacheStats = getRequestCacheStats(client, "index"); + perQuerySizeInCacheInBytes = requestCacheStats.getMemorySizeInBytes(); + } + assertSearchResponse(resp); + } + RequestCacheStats requestCacheStats = getRequestCacheStats(client, "index"); + assertEquals(numberOfIndexedItems, requestCacheStats.getMissCount()); + assertEquals(0, requestCacheStats.getHitCount()); + assertEquals(0, requestCacheStats.getEvictions()); + assertEquals(perQuerySizeInCacheInBytes * numberOfIndexedItems, requestCacheStats.getMemorySizeInBytes()); + + // Explicit clear the cache. + ClearIndicesCacheRequest request = new ClearIndicesCacheRequest("index"); + ClearIndicesCacheResponse response = client.admin().indices().clearCache(request).get(); + assertNoFailures(response); + + assertBusy(() -> { + // All entries should get cleared up. + assertTrue(getRequestCacheStats(client, "index").getMemorySizeInBytes() == 0); + }, 1, TimeUnit.SECONDS); + } + + private RequestCacheStats getRequestCacheStats(Client client, String indexName) { + return client.admin().indices().prepareStats(indexName).setRequestCache(true).get().getTotal().getRequestCache(); + } + public static class MockDiskCachePlugin extends Plugin implements CachePlugin { public MockDiskCachePlugin() {} diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/policy/TookTimePolicy.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/policy/TookTimePolicy.java index 96ef027c17187..4bc26803acf4c 100644 --- a/modules/cache-common/src/main/java/org/opensearch/cache/common/policy/TookTimePolicy.java +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/policy/TookTimePolicy.java @@ -13,12 +13,16 @@ package org.opensearch.cache.common.policy; +import org.opensearch.common.cache.CacheType; import org.opensearch.common.cache.policy.CachedQueryResult; +import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.unit.TimeValue; import java.util.function.Function; import java.util.function.Predicate; +import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP; + /** * A cache tier policy which accepts queries whose took time is greater than some threshold. * The threshold should be set to approximately the time it takes to get a result from the cache tier. @@ -30,7 +34,7 @@ public class TookTimePolicy implements Predicate { /** * The minimum took time to allow a query. Set to TimeValue.ZERO to let all data through. */ - private final TimeValue threshold; + private TimeValue threshold; /** * Function which extracts the relevant PolicyValues from a serialized CachedQueryResult @@ -41,13 +45,25 @@ public class TookTimePolicy implements Predicate { * Constructs a took time policy. * @param threshold the threshold * @param cachedResultParser the function providing policy values + * @param clusterSettings cluster settings + * @param cacheType cache type */ - public TookTimePolicy(TimeValue threshold, Function cachedResultParser) { + public TookTimePolicy( + TimeValue threshold, + Function cachedResultParser, + ClusterSettings clusterSettings, + CacheType cacheType + ) { if (threshold.compareTo(TimeValue.ZERO) < 0) { throw new IllegalArgumentException("Threshold for TookTimePolicy must be >= 0ms but was " + threshold.getStringRep()); } this.threshold = threshold; this.cachedResultParser = cachedResultParser; + clusterSettings.addSettingsUpdateConsumer(TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(cacheType), this::setThreshold); + } + + private void setThreshold(TimeValue threshold) { + this.threshold = threshold; } /** diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java index 00a8eec93acc9..cab05732ba1c4 100644 --- a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java @@ -15,19 +15,21 @@ import org.opensearch.common.cache.LoadAwareCacheLoader; import org.opensearch.common.cache.RemovalListener; import org.opensearch.common.cache.RemovalNotification; +import org.opensearch.common.cache.RemovalReason; import org.opensearch.common.cache.policy.CachedQueryResult; import org.opensearch.common.cache.store.config.CacheConfig; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.ReleasableLock; -import org.opensearch.common.util.iterable.Iterables; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.NoSuchElementException; import java.util.Objects; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; @@ -47,6 +49,9 @@ @ExperimentalApi public class TieredSpilloverCache implements ICache { + // Used to avoid caching stale entries in lower tiers. + private static final List SPILLOVER_REMOVAL_REASONS = List.of(RemovalReason.EVICTED, RemovalReason.CAPACITY); + private final ICache diskCache; private final ICache onHeapCache; private final RemovalListener removalListener; @@ -69,8 +74,11 @@ public class TieredSpilloverCache implements ICache { @Override public void onRemoval(RemovalNotification notification) { try (ReleasableLock ignore = writeLock.acquire()) { - if (evaluatePolicies(notification.getValue())) { + if (SPILLOVER_REMOVAL_REASONS.contains(notification.getRemovalReason()) + && evaluatePolicies(notification.getValue())) { diskCache.put(notification.getKey(), notification.getValue()); + } else { + removalListener.onRemoval(notification); } } } @@ -81,6 +89,7 @@ public void onRemoval(RemovalNotification notification) { .setWeigher(builder.cacheConfig.getWeigher()) .setMaxSizeInBytes(builder.cacheConfig.getMaxSizeInBytes()) .setExpireAfterAccess(builder.cacheConfig.getExpireAfterAccess()) + .setClusterSettings(builder.cacheConfig.getClusterSettings()) .build(), builder.cacheType, builder.cacheFactories @@ -156,10 +165,11 @@ public void invalidateAll() { * Provides an iteration over both onHeap and disk keys. This is not protected from any mutations to the cache. * @return An iterable over (onHeap + disk) keys */ - @SuppressWarnings("unchecked") + @SuppressWarnings({ "unchecked" }) @Override public Iterable keys() { - return Iterables.concat(onHeapCache.keys(), diskCache.keys()); + Iterable[] iterables = (Iterable[]) new Iterable[] { onHeapCache.keys(), diskCache.keys() }; + return new ConcatenatedIterables(iterables); } @Override @@ -213,6 +223,67 @@ boolean evaluatePolicies(V value) { return true; } + /** + * ConcatenatedIterables which combines cache iterables and supports remove() functionality as well if underlying + * iterator supports it. + * @param Type of key. + */ + static class ConcatenatedIterables implements Iterable { + + final Iterable[] iterables; + + ConcatenatedIterables(Iterable[] iterables) { + this.iterables = iterables; + } + + @SuppressWarnings({ "unchecked" }) + @Override + public Iterator iterator() { + Iterator[] iterators = (Iterator[]) new Iterator[iterables.length]; + for (int i = 0; i < iterables.length; i++) { + iterators[i] = iterables[i].iterator(); + } + return new ConcatenatedIterator<>(iterators); + } + + static class ConcatenatedIterator implements Iterator { + private final Iterator[] iterators; + private int currentIteratorIndex; + private Iterator currentIterator; + + public ConcatenatedIterator(Iterator[] iterators) { + this.iterators = iterators; + this.currentIteratorIndex = 0; + this.currentIterator = iterators[currentIteratorIndex]; + } + + @Override + public boolean hasNext() { + while (!currentIterator.hasNext()) { + currentIteratorIndex++; + if (currentIteratorIndex == iterators.length) { + return false; + } + currentIterator = iterators[currentIteratorIndex]; + } + return true; + } + + @Override + public T next() { + if (!hasNext()) { + throw new NoSuchElementException(); + } + return currentIterator.next(); + } + + @Override + public void remove() { + currentIterator.remove(); + } + } + } + /** * Factory to create TieredSpilloverCache objects. */ @@ -253,8 +324,7 @@ public ICache create(CacheConfig config, CacheType cacheType, } ICache.Factory diskCacheFactory = cacheFactories.get(diskCacheStoreName); - TimeValue diskPolicyThreshold = TieredSpilloverCacheSettings.TIERED_SPILLOVER_DISK_TOOK_TIME_THRESHOLD - .getConcreteSettingForNamespace(cacheType.getSettingPrefix()) + TimeValue diskPolicyThreshold = TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(cacheType) .get(settings); Function cachedResultParser = Objects.requireNonNull( config.getCachedResultParser(), @@ -266,7 +336,7 @@ public ICache create(CacheConfig config, CacheType cacheType, .setRemovalListener(config.getRemovalListener()) .setCacheConfig(config) .setCacheType(cacheType) - .addPolicy(new TookTimePolicy(diskPolicyThreshold, cachedResultParser)) + .addPolicy(new TookTimePolicy(diskPolicyThreshold, cachedResultParser, config.getClusterSettings(), cacheType)) .build(); } diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCachePlugin.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCachePlugin.java index 0cc8a711faaf5..dfd40199d859e 100644 --- a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCachePlugin.java +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCachePlugin.java @@ -18,6 +18,8 @@ import java.util.List; import java.util.Map; +import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP; + /** * Plugin for TieredSpilloverCache. */ @@ -51,11 +53,7 @@ public List> getSettings() { settingList.add( TieredSpilloverCacheSettings.TIERED_SPILLOVER_DISK_STORE_NAME.getConcreteSettingForNamespace(cacheType.getSettingPrefix()) ); - settingList.add( - TieredSpilloverCacheSettings.TIERED_SPILLOVER_DISK_TOOK_TIME_THRESHOLD.getConcreteSettingForNamespace( - cacheType.getSettingPrefix() - ) - ); + settingList.add(TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(cacheType)); } return settingList; } diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheSettings.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheSettings.java index 684307960b8a5..b89e8c517a351 100644 --- a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheSettings.java +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheSettings.java @@ -8,9 +8,12 @@ package org.opensearch.cache.common.tier; +import org.opensearch.common.cache.CacheType; import org.opensearch.common.settings.Setting; import org.opensearch.common.unit.TimeValue; +import java.util.HashMap; +import java.util.Map; import java.util.concurrent.TimeUnit; import static org.opensearch.common.settings.Setting.Property.NodeScope; @@ -42,17 +45,36 @@ public class TieredSpilloverCacheSettings { /** * Setting defining the minimum took time for a query to be allowed into the disk cache. */ - public static final Setting.AffixSetting TIERED_SPILLOVER_DISK_TOOK_TIME_THRESHOLD = Setting.suffixKeySetting( + private static final Setting.AffixSetting TIERED_SPILLOVER_DISK_TOOK_TIME_THRESHOLD = Setting.suffixKeySetting( TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME + ".disk.store.policies.took_time.threshold", (key) -> Setting.timeSetting( key, new TimeValue(10, TimeUnit.MILLISECONDS), // Default value for this setting TimeValue.ZERO, // Minimum value for this setting - NodeScope + NodeScope, + Setting.Property.Dynamic ) ); - // 10 ms was chosen as a safe value based on proof of concept, where we saw disk latencies in this range. - // Will be tuned further with future benchmarks. + + /** + * Stores took time policy settings for various cache types as these are dynamic so that can be registered and + * retrieved accordingly. + */ + public static final Map> TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP; + + /** + * Fetches concrete took time policy settings. + */ + static { + Map> concreteTookTimePolicySettingMap = new HashMap<>(); + for (CacheType cacheType : CacheType.values()) { + concreteTookTimePolicySettingMap.put( + cacheType, + TIERED_SPILLOVER_DISK_TOOK_TIME_THRESHOLD.getConcreteSettingForNamespace(cacheType.getSettingPrefix()) + ); + } + TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP = concreteTookTimePolicySettingMap; + } /** * Default constructor diff --git a/modules/cache-common/src/test/java/org/opensearch/cache/common/policy/TookTimePolicyTests.java b/modules/cache-common/src/test/java/org/opensearch/cache/common/policy/TookTimePolicyTests.java index 237c9c7b79db4..000067280e50d 100644 --- a/modules/cache-common/src/test/java/org/opensearch/cache/common/policy/TookTimePolicyTests.java +++ b/modules/cache-common/src/test/java/org/opensearch/cache/common/policy/TookTimePolicyTests.java @@ -12,20 +12,27 @@ import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TotalHits; import org.opensearch.common.Randomness; +import org.opensearch.common.cache.CacheType; import org.opensearch.common.cache.policy.CachedQueryResult; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.lucene.search.TopDocsAndMaxScore; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.search.DocValueFormat; import org.opensearch.search.query.QuerySearchResult; import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; import java.io.IOException; +import java.util.HashSet; import java.util.Random; import java.util.function.Function; +import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP; + public class TookTimePolicyTests extends OpenSearchTestCase { private final Function transformationFunction = (data) -> { try { @@ -35,8 +42,17 @@ public class TookTimePolicyTests extends OpenSearchTestCase { } }; + private ClusterSettings clusterSettings; + + @Before + public void setup() { + Settings settings = Settings.EMPTY; + clusterSettings = new ClusterSettings(settings, new HashSet<>()); + clusterSettings.registerSetting(TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE)); + } + private TookTimePolicy getTookTimePolicy(TimeValue threshold) { - return new TookTimePolicy<>(threshold, transformationFunction); + return new TookTimePolicy<>(threshold, transformationFunction, clusterSettings, CacheType.INDICES_REQUEST_CACHE); } public void testTookTimePolicy() throws Exception { diff --git a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/MockDiskCache.java b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/MockDiskCache.java index d8a6eb480a5a5..548c5d846dda5 100644 --- a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/MockDiskCache.java +++ b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/MockDiskCache.java @@ -18,7 +18,9 @@ import org.opensearch.common.cache.store.builders.ICacheBuilder; import org.opensearch.common.cache.store.config.CacheConfig; +import java.util.Iterator; import java.util.Map; +import java.util.NoSuchElementException; import java.util.concurrent.ConcurrentHashMap; public class MockDiskCache implements ICache { @@ -79,7 +81,7 @@ public void invalidateAll() { @Override public Iterable keys() { - return this.cache.keySet(); + return () -> new CacheKeyIterator<>(cache, removalListener); } @Override @@ -156,4 +158,48 @@ public Builder setValueSerializer(Serializer valueSerializer) { } } + + /** + * Provides a iterator over keys. + * @param Type of key + * @param Type of value + */ + static class CacheKeyIterator implements Iterator { + private final Iterator> entryIterator; + private final Map cache; + private final RemovalListener removalListener; + private K currentKey; + + public CacheKeyIterator(Map cache, RemovalListener removalListener) { + this.entryIterator = cache.entrySet().iterator(); + this.removalListener = removalListener; + this.cache = cache; + } + + @Override + public boolean hasNext() { + return entryIterator.hasNext(); + } + + @Override + public K next() { + if (!hasNext()) { + throw new NoSuchElementException(); + } + Map.Entry entry = entryIterator.next(); + currentKey = entry.getKey(); + return currentKey; + } + + @Override + public void remove() { + if (currentKey == null) { + throw new IllegalStateException("No element to remove"); + } + V value = cache.get(currentKey); + cache.remove(currentKey); + this.removalListener.onRemoval(new RemovalNotification<>(currentKey, value, RemovalReason.INVALIDATED)); + currentKey = null; + } + } } diff --git a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java index b132952834f06..431aca51099a6 100644 --- a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java +++ b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java @@ -19,14 +19,17 @@ import org.opensearch.common.cache.store.config.CacheConfig; import org.opensearch.common.cache.store.settings.OpenSearchOnHeapCacheSettings; import org.opensearch.common.metrics.CounterMetric; +import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.FeatureFlags; import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; import java.util.ArrayList; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.UUID; @@ -38,10 +41,20 @@ import java.util.function.Function; import java.util.function.Predicate; +import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP; import static org.opensearch.common.cache.store.settings.OpenSearchOnHeapCacheSettings.MAXIMUM_SIZE_IN_BYTES_KEY; public class TieredSpilloverCacheTests extends OpenSearchTestCase { + private ClusterSettings clusterSettings; + + @Before + public void setup() { + Settings settings = Settings.EMPTY; + clusterSettings = new ClusterSettings(settings, new HashSet<>()); + clusterSettings.registerSetting(TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE)); + } + public void testComputeIfAbsentWithoutAnyOnHeapCacheEviction() throws Exception { int onHeapCacheSize = randomIntBetween(10, 30); int keyValueSize = 50; @@ -134,6 +147,7 @@ public void testComputeIfAbsentWithFactoryBasedCacheCreation() throws Exception .setSettings(settings) .setCachedResultParser(s -> new CachedQueryResult.PolicyValues(20_000_000L)) // Values will always appear to have taken // 20_000_000 ns = 20 ms to compute + .setClusterSettings(clusterSettings) .build(), CacheType.INDICES_REQUEST_CACHE, Map.of( @@ -959,9 +973,7 @@ public void testTookTimePolicyFromFactory() throws Exception { onHeapCacheSize * keyValueSize + "b" ) .put( - TieredSpilloverCacheSettings.TIERED_SPILLOVER_DISK_TOOK_TIME_THRESHOLD.getConcreteSettingForNamespace( - CacheType.INDICES_REQUEST_CACHE.getSettingPrefix() - ).getKey(), + TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), new TimeValue(timeValueThresholdNanos / 1_000_000) ) .build(); @@ -979,6 +991,7 @@ public CachedQueryResult.PolicyValues apply(String s) { return new CachedQueryResult.PolicyValues(tookTimeMap.get(s)); } }) + .setClusterSettings(clusterSettings) .build(), CacheType.INDICES_REQUEST_CACHE, Map.of( @@ -1024,8 +1037,9 @@ public CachedQueryResult.PolicyValues apply(String s) { public void testMinimumThresholdSettingValue() throws Exception { // Confirm we can't set TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_DISK_TOOK_TIME_THRESHOLD to below // TimeValue.ZERO (for example, MINUS_ONE) - Setting concreteSetting = TieredSpilloverCacheSettings.TIERED_SPILLOVER_DISK_TOOK_TIME_THRESHOLD - .getConcreteSettingForNamespace(CacheType.INDICES_REQUEST_CACHE.getSettingPrefix()); + Setting concreteSetting = TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get( + CacheType.INDICES_REQUEST_CACHE + ); TimeValue validDuration = new TimeValue(0, TimeUnit.MILLISECONDS); Settings validSettings = Settings.builder().put(concreteSetting.getKey(), validDuration).build(); diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/70_bulk.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/70_bulk.yml index 2dfa17174b139..d7be48a92908c 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/70_bulk.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/70_bulk.yml @@ -144,3 +144,26 @@ teardown: - is_false: _source.field1 - match: {_source.field2: value2} + +# related issue: https://github.com/opensearch-project/OpenSearch/issues/12854 +--- +"Test bulk honors pipeline in update action with upsert": + - skip: + version: " - 2.13.99" + reason: "fixed in 2.14.0" + + - do: + bulk: + refresh: true + body: + - '{"update": {"_index": "test_index", "_id": "test_id3", "pipeline": "pipeline1"}}' + - '{"upsert": {"f1": "v2", "f2": 47}, "doc": {"x": 1}}' + + - match: { errors: false } + - match: { items.0.update.result: created } + + - do: + get: + index: test_index + id: test_id3 + - match: { _source: {"f1": "v2", "f2": 47, "field1": "value1"}} diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessPlugin.java b/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessPlugin.java index aa4081dc70ba6..2217497ebfca0 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessPlugin.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessPlugin.java @@ -60,6 +60,7 @@ import org.opensearch.repositories.RepositoriesService; import org.opensearch.rest.RestController; import org.opensearch.rest.RestHandler; +import org.opensearch.script.DerivedFieldScript; import org.opensearch.script.IngestScript; import org.opensearch.script.ScoreScript; import org.opensearch.script.ScriptContext; @@ -108,6 +109,11 @@ public final class PainlessPlugin extends Plugin implements ScriptPlugin, Extens ingest.add(WhitelistLoader.loadFromResourceFiles(Whitelist.class, "org.opensearch.ingest.txt")); map.put(IngestScript.CONTEXT, ingest); + // Functions available to derived fields + List derived = new ArrayList<>(Whitelist.BASE_WHITELISTS); + derived.add(WhitelistLoader.loadFromResourceFiles(Whitelist.class, "org.opensearch.derived.txt")); + map.put(DerivedFieldScript.CONTEXT, derived); + allowlists = map; } diff --git a/modules/lang-painless/src/main/resources/org/opensearch/painless/spi/org.opensearch.derived.txt b/modules/lang-painless/src/main/resources/org/opensearch/painless/spi/org.opensearch.derived.txt new file mode 100644 index 0000000000000..9a3dd4894b286 --- /dev/null +++ b/modules/lang-painless/src/main/resources/org/opensearch/painless/spi/org.opensearch.derived.txt @@ -0,0 +1,17 @@ +# +# SPDX-License-Identifier: Apache-2.0 +# +# The OpenSearch Contributors require contributions made to +# this file be licensed under the Apache-2.0 license or a +# compatible open source license. +# + +# This file contains an allowlist for functions to be used in derived field context + +class org.opensearch.script.DerivedFieldScript @no_import { +} + +static_import { + void emit(org.opensearch.script.DerivedFieldScript, Object) bound_to org.opensearch.script.ScriptEmitValues$EmitSingle + void emit(org.opensearch.script.DerivedFieldScript, double, double) bound_to org.opensearch.script.ScriptEmitValues$GeoPoint +} diff --git a/modules/lang-painless/src/test/java/org/opensearch/painless/DerivedFieldScriptTests.java b/modules/lang-painless/src/test/java/org/opensearch/painless/DerivedFieldScriptTests.java new file mode 100644 index 0000000000000..7ccbfaac2eac7 --- /dev/null +++ b/modules/lang-painless/src/test/java/org/opensearch/painless/DerivedFieldScriptTests.java @@ -0,0 +1,227 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.painless; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.memory.MemoryIndex; +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.geo.GeoPoint; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.fielddata.IndexGeoPointFieldData; +import org.opensearch.index.fielddata.IndexNumericFieldData; +import org.opensearch.index.fielddata.LeafGeoPointFieldData; +import org.opensearch.index.fielddata.LeafNumericFieldData; +import org.opensearch.index.fielddata.MultiGeoPointValues; +import org.opensearch.index.fielddata.SortedNumericDoubleValues; +import org.opensearch.index.fielddata.plain.AbstractLeafGeoPointFieldData; +import org.opensearch.index.fielddata.plain.LeafDoubleFieldData; +import org.opensearch.index.mapper.GeoPointFieldMapper.GeoPointFieldType; +import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.mapper.NumberFieldMapper.NumberFieldType; +import org.opensearch.index.mapper.NumberFieldMapper.NumberType; +import org.opensearch.painless.spi.Whitelist; +import org.opensearch.painless.spi.WhitelistLoader; +import org.opensearch.script.DerivedFieldScript; +import org.opensearch.script.ScriptContext; +import org.opensearch.script.ScriptException; +import org.opensearch.search.lookup.LeafSearchLookup; +import org.opensearch.search.lookup.SearchLookup; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class DerivedFieldScriptTests extends ScriptTestCase { + + private static PainlessScriptEngine SCRIPT_ENGINE; + + @Override + public void setUp() throws Exception { + super.setUp(); + + // Adding derived field script to the contexts for the script engine + Map, List> contexts = newDefaultContexts(); + List allowlists = new ArrayList<>(Whitelist.BASE_WHITELISTS); + allowlists.add(WhitelistLoader.loadFromResourceFiles(Whitelist.class, "org.opensearch.derived.txt")); + contexts.put(DerivedFieldScript.CONTEXT, allowlists); + + SCRIPT_ENGINE = new PainlessScriptEngine(Settings.EMPTY, contexts); + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + SCRIPT_ENGINE = null; + } + + @Override + protected PainlessScriptEngine getEngine() { + return SCRIPT_ENGINE; + } + + private DerivedFieldScript.LeafFactory compile(String expression, SearchLookup lookup) { + DerivedFieldScript.Factory factory = SCRIPT_ENGINE.compile( + "derived_script_test", + expression, + DerivedFieldScript.CONTEXT, + Collections.emptyMap() + ); + return factory.newFactory(Collections.emptyMap(), lookup); + } + + public void testEmittingDoubleField() throws IOException { + // Mocking field value to be returned + NumberFieldType fieldType = new NumberFieldType("test_double_field", NumberType.DOUBLE); + MapperService mapperService = mock(MapperService.class); + when(mapperService.fieldType("test_double_field")).thenReturn(fieldType); + + SortedNumericDoubleValues doubleValues = mock(SortedNumericDoubleValues.class); + when(doubleValues.docValueCount()).thenReturn(1); + when(doubleValues.advanceExact(anyInt())).thenReturn(true); + when(doubleValues.nextValue()).thenReturn(2.718); + + LeafNumericFieldData atomicFieldData = mock(LeafDoubleFieldData.class); // SortedNumericDoubleFieldData + when(atomicFieldData.getDoubleValues()).thenReturn(doubleValues); + + IndexNumericFieldData fieldData = mock(IndexNumericFieldData.class); // SortedNumericIndexFieldData + when(fieldData.getFieldName()).thenReturn("test_double_field"); + when(fieldData.load(any())).thenReturn(atomicFieldData); + + SearchLookup lookup = new SearchLookup(mapperService, (ignored, searchLookup) -> fieldData); + + // We don't need a real index, just need to construct a LeafReaderContext which cannot be mocked + MemoryIndex index = new MemoryIndex(); + LeafReaderContext leafReaderContext = index.createSearcher().getIndexReader().leaves().get(0); + + // Execute the script + DerivedFieldScript script = compile("emit(doc['test_double_field'].value)", lookup).newInstance(leafReaderContext); + script.setDocument(1); + script.execute(); + + List result = script.getEmittedValues(); + assertEquals(List.of(2.718), result); + } + + public void testEmittingGeoPoint() throws IOException { + // Mocking field value to be returned + GeoPointFieldType fieldType = new GeoPointFieldType("test_geo_field"); + MapperService mapperService = mock(MapperService.class); + when(mapperService.fieldType("test_geo_field")).thenReturn(fieldType); + + MultiGeoPointValues geoPointValues = mock(MultiGeoPointValues.class); + when(geoPointValues.docValueCount()).thenReturn(1); + when(geoPointValues.advanceExact(anyInt())).thenReturn(true); + when(geoPointValues.nextValue()).thenReturn(new GeoPoint(5, 8)); + + LeafGeoPointFieldData atomicFieldData = mock(AbstractLeafGeoPointFieldData.class); // LatLonPointDVLeafFieldData + when(atomicFieldData.getGeoPointValues()).thenReturn(geoPointValues); + + IndexGeoPointFieldData fieldData = mock(IndexGeoPointFieldData.class); + when(fieldData.getFieldName()).thenReturn("test_geo_field"); + when(fieldData.load(any())).thenReturn(atomicFieldData); + + SearchLookup lookup = new SearchLookup(mapperService, (ignored, searchLookup) -> fieldData); + + // We don't need a real index, just need to construct a LeafReaderContext which cannot be mocked + MemoryIndex index = new MemoryIndex(); + LeafReaderContext leafReaderContext = index.createSearcher().getIndexReader().leaves().get(0); + + // Execute the script + DerivedFieldScript script = compile("emit(doc['test_geo_field'].value.getLat(), doc['test_geo_field'].value.getLon())", lookup) + .newInstance(leafReaderContext); + script.setDocument(1); + script.execute(); + + List result = script.getEmittedValues(); + assertEquals(List.of(new Tuple<>(5.0, 8.0)), result); + } + + public void testEmittingMultipleValues() throws IOException { + SearchLookup lookup = mock(SearchLookup.class); + + // We don't need a real index, just need to construct a LeafReaderContext which cannot be mocked + MemoryIndex index = new MemoryIndex(); + LeafReaderContext leafReaderContext = index.createSearcher().getIndexReader().leaves().get(0); + + LeafSearchLookup leafSearchLookup = mock(LeafSearchLookup.class); + when(lookup.getLeafSearchLookup(leafReaderContext)).thenReturn(leafSearchLookup); + + // Execute the script + DerivedFieldScript script = compile( + "def l = new ArrayList(); l.add('test'); l.add('multiple'); l.add('values'); for (String x : l) emit(x)", + lookup + ).newInstance(leafReaderContext); + script.setDocument(1); + script.execute(); + + List result = script.getEmittedValues(); + assertEquals(List.of("test", "multiple", "values"), result); + } + + public void testExceedingByteSizeLimit() throws IOException { + SearchLookup lookup = mock(SearchLookup.class); + + // We don't need a real index, just need to construct a LeafReaderContext which cannot be mocked + MemoryIndex index = new MemoryIndex(); + LeafReaderContext leafReaderContext = index.createSearcher().getIndexReader().leaves().get(0); + + LeafSearchLookup leafSearchLookup = mock(LeafSearchLookup.class); + when(lookup.getLeafSearchLookup(leafReaderContext)).thenReturn(leafSearchLookup); + + // Emitting a large string to exceed the byte size limit + DerivedFieldScript stringScript = compile("for (int i = 0; i < 1024 * 1024; i++) emit('a' + i);", lookup).newInstance( + leafReaderContext + ); + expectThrows(ScriptException.class, () -> { + stringScript.setDocument(1); + stringScript.execute(); + }); + + // Emitting an integer to check byte size limit + DerivedFieldScript intScript = compile("for (int i = 0; i < 1024 * 1024; i++) emit(42)", lookup).newInstance(leafReaderContext); + expectThrows(ScriptException.class, "Expected IllegalStateException for exceeding byte size limit", () -> { + intScript.setDocument(1); + intScript.execute(); + }); + + // Emitting a long to check byte size limit + DerivedFieldScript longScript = compile("for (int i = 0; i < 1024 * 1024; i++) emit(1234567890123456789L)", lookup).newInstance( + leafReaderContext + ); + expectThrows(ScriptException.class, "Expected IllegalStateException for exceeding byte size limit", () -> { + longScript.setDocument(1); + longScript.execute(); + }); + + // Emitting a double to check byte size limit + DerivedFieldScript doubleScript = compile("for (int i = 0; i < 1024 * 1024; i++) emit(3.14159)", lookup).newInstance( + leafReaderContext + ); + expectThrows(ScriptException.class, "Expected IllegalStateException for exceeding byte size limit", () -> { + doubleScript.setDocument(1); + doubleScript.execute(); + }); + + // Emitting a GeoPoint to check byte size limit + DerivedFieldScript geoPointScript = compile("for (int i = 0; i < 1024 * 1024; i++) emit(1.23, 4.56);", lookup).newInstance( + leafReaderContext + ); + expectThrows(ScriptException.class, "Expected IllegalStateException for exceeding byte size limit", () -> { + geoPointScript.setDocument(1); + geoPointScript.execute(); + }); + } +} diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/71_context_api.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/71_context_api.yml index 478ca9ae8abf4..20e6fd351a4b9 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/71_context_api.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/71_context_api.yml @@ -2,7 +2,7 @@ - do: scripts_painless_context: {} - match: { contexts.0: aggregation_selector} - - match: { contexts.23: update} + - match: { contexts.24: update} --- "Action to get all API values for score context": diff --git a/plugins/analysis-phonetic/licenses/commons-codec-1.15.jar.sha1 b/plugins/analysis-phonetic/licenses/commons-codec-1.15.jar.sha1 deleted file mode 100644 index 62d99837b87e1..0000000000000 --- a/plugins/analysis-phonetic/licenses/commons-codec-1.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/commons-codec-1.16.1.jar.sha1 b/plugins/analysis-phonetic/licenses/commons-codec-1.16.1.jar.sha1 new file mode 100644 index 0000000000000..6b8803089c6d7 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/commons-codec-1.16.1.jar.sha1 @@ -0,0 +1 @@ +47bd4d333fba53406f6c6c51884ddbca435c8862 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/commons-codec-1.15.jar.sha1 b/plugins/crypto-kms/licenses/commons-codec-1.15.jar.sha1 deleted file mode 100644 index 62d99837b87e1..0000000000000 --- a/plugins/crypto-kms/licenses/commons-codec-1.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/commons-codec-1.16.1.jar.sha1 b/plugins/crypto-kms/licenses/commons-codec-1.16.1.jar.sha1 new file mode 100644 index 0000000000000..6b8803089c6d7 --- /dev/null +++ b/plugins/crypto-kms/licenses/commons-codec-1.16.1.jar.sha1 @@ -0,0 +1 @@ +47bd4d333fba53406f6c6c51884ddbca435c8862 \ No newline at end of file diff --git a/plugins/discovery-azure-classic/build.gradle b/plugins/discovery-azure-classic/build.gradle index c3d70e9c64968..7f34cec94499c 100644 --- a/plugins/discovery-azure-classic/build.gradle +++ b/plugins/discovery-azure-classic/build.gradle @@ -53,7 +53,7 @@ dependencies { api "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}" api "commons-codec:commons-codec:${versions.commonscodec}" api "commons-lang:commons-lang:2.6" - api "commons-io:commons-io:2.15.1" + api "commons-io:commons-io:${versions.commonsio}" api 'javax.mail:mail:1.4.7' api 'javax.inject:javax.inject:1' api "com.sun.jersey:jersey-client:${versions.jersey}" diff --git a/plugins/discovery-azure-classic/licenses/commons-codec-1.15.jar.sha1 b/plugins/discovery-azure-classic/licenses/commons-codec-1.15.jar.sha1 deleted file mode 100644 index 62d99837b87e1..0000000000000 --- a/plugins/discovery-azure-classic/licenses/commons-codec-1.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/commons-codec-1.16.1.jar.sha1 b/plugins/discovery-azure-classic/licenses/commons-codec-1.16.1.jar.sha1 new file mode 100644 index 0000000000000..6b8803089c6d7 --- /dev/null +++ b/plugins/discovery-azure-classic/licenses/commons-codec-1.16.1.jar.sha1 @@ -0,0 +1 @@ +47bd4d333fba53406f6c6c51884ddbca435c8862 \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/commons-io-2.15.1.jar.sha1 b/plugins/discovery-azure-classic/licenses/commons-io-2.15.1.jar.sha1 deleted file mode 100644 index 47c5d13812a36..0000000000000 --- a/plugins/discovery-azure-classic/licenses/commons-io-2.15.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f11560da189ab563a5c8e351941415430e9304ea \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/commons-io-2.16.0.jar.sha1 b/plugins/discovery-azure-classic/licenses/commons-io-2.16.0.jar.sha1 new file mode 100644 index 0000000000000..6a7b638719fa3 --- /dev/null +++ b/plugins/discovery-azure-classic/licenses/commons-io-2.16.0.jar.sha1 @@ -0,0 +1 @@ +27875a7935f1ddcc13267eb6fae1f719e0409572 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/commons-codec-1.15.jar.sha1 b/plugins/discovery-ec2/licenses/commons-codec-1.15.jar.sha1 deleted file mode 100644 index 62d99837b87e1..0000000000000 --- a/plugins/discovery-ec2/licenses/commons-codec-1.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/commons-codec-1.16.1.jar.sha1 b/plugins/discovery-ec2/licenses/commons-codec-1.16.1.jar.sha1 new file mode 100644 index 0000000000000..6b8803089c6d7 --- /dev/null +++ b/plugins/discovery-ec2/licenses/commons-codec-1.16.1.jar.sha1 @@ -0,0 +1 @@ +47bd4d333fba53406f6c6c51884ddbca435c8862 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/commons-codec-1.15.jar.sha1 b/plugins/discovery-gce/licenses/commons-codec-1.15.jar.sha1 deleted file mode 100644 index 62d99837b87e1..0000000000000 --- a/plugins/discovery-gce/licenses/commons-codec-1.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/commons-codec-1.16.1.jar.sha1 b/plugins/discovery-gce/licenses/commons-codec-1.16.1.jar.sha1 new file mode 100644 index 0000000000000..6b8803089c6d7 --- /dev/null +++ b/plugins/discovery-gce/licenses/commons-codec-1.16.1.jar.sha1 @@ -0,0 +1 @@ +47bd4d333fba53406f6c6c51884ddbca435c8862 \ No newline at end of file diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index 6da34c4c9caf2..4749aa911886d 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -38,10 +38,10 @@ opensearchplugin { } versions << [ - 'tika' : '2.6.0', - 'pdfbox': '2.0.27', - 'poi' : '5.2.3', - 'mime4j': '0.8.8' + 'tika' : '2.9.2', + 'pdfbox': '2.0.31', + 'poi' : '5.2.5', + 'mime4j': '0.8.11' ] dependencies { @@ -50,6 +50,16 @@ dependencies { api "org.apache.tika:tika-parsers:${versions.tika}" // Required for the various document parsers api "org.apache.tika:tika-parsers-standard-package:${versions.tika}" + api "org.apache.tika:tika-parser-apple-module:${versions.tika}" + api "org.apache.tika:tika-parser-html-module:${versions.tika}" + api "org.apache.tika:tika-parser-microsoft-module:${versions.tika}" + api "org.apache.tika:tika-parser-miscoffice-module:${versions.tika}" + api "org.apache.tika:tika-parser-pdf-module:${versions.tika}" + api "org.apache.tika:tika-parser-text-module:${versions.tika}" + api "org.apache.tika:tika-parser-xml-module:${versions.tika}" + // Utilities consumed by document parsers + api "org.apache.tika:tika-parser-xmp-commons:${versions.tika}" + api "org.apache.tika:tika-parser-zip-commons:${versions.tika}" // Required for language detection api "org.apache.tika:tika-langdetect-optimaize:${versions.tika}" // Optimaize libraries/dependencies @@ -57,7 +67,7 @@ dependencies { runtimeOnly "com.google.guava:guava:${versions.guava}" // Other dependencies api 'org.tukaani:xz:1.9' - api 'commons-io:commons-io:2.15.1' + api "commons-io:commons-io:${versions.commonsio}" api "org.slf4j:slf4j-api:${versions.slf4j}" // character set detection diff --git a/plugins/ingest-attachment/licenses/apache-mime4j-core-0.8.11.jar.sha1 b/plugins/ingest-attachment/licenses/apache-mime4j-core-0.8.11.jar.sha1 new file mode 100644 index 0000000000000..82d9bf2617ce6 --- /dev/null +++ b/plugins/ingest-attachment/licenses/apache-mime4j-core-0.8.11.jar.sha1 @@ -0,0 +1 @@ +6d1eb5f7b84eaa9d38fca13b761f01c693aef3da \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/apache-mime4j-core-0.8.8.jar.sha1 b/plugins/ingest-attachment/licenses/apache-mime4j-core-0.8.8.jar.sha1 deleted file mode 100644 index 77c36691d36b5..0000000000000 --- a/plugins/ingest-attachment/licenses/apache-mime4j-core-0.8.8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7330de23c52f71617cbec7f1d2760dae32e687cd \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/apache-mime4j-dom-0.8.11.jar.sha1 b/plugins/ingest-attachment/licenses/apache-mime4j-dom-0.8.11.jar.sha1 new file mode 100644 index 0000000000000..7a494aba6a231 --- /dev/null +++ b/plugins/ingest-attachment/licenses/apache-mime4j-dom-0.8.11.jar.sha1 @@ -0,0 +1 @@ +f0d42ab9a5832b5f5d05afc004b31245b838e0fc \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/apache-mime4j-dom-0.8.8.jar.sha1 b/plugins/ingest-attachment/licenses/apache-mime4j-dom-0.8.8.jar.sha1 deleted file mode 100644 index fb9c5fed27162..0000000000000 --- a/plugins/ingest-attachment/licenses/apache-mime4j-dom-0.8.8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e76715563a6bd150f84ccb0adb920aec8faf4779 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/commons-codec-1.15.jar.sha1 b/plugins/ingest-attachment/licenses/commons-codec-1.15.jar.sha1 deleted file mode 100644 index 62d99837b87e1..0000000000000 --- a/plugins/ingest-attachment/licenses/commons-codec-1.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/commons-codec-1.16.1.jar.sha1 b/plugins/ingest-attachment/licenses/commons-codec-1.16.1.jar.sha1 new file mode 100644 index 0000000000000..6b8803089c6d7 --- /dev/null +++ b/plugins/ingest-attachment/licenses/commons-codec-1.16.1.jar.sha1 @@ -0,0 +1 @@ +47bd4d333fba53406f6c6c51884ddbca435c8862 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/commons-compress-1.24.0.jar.sha1 b/plugins/ingest-attachment/licenses/commons-compress-1.24.0.jar.sha1 deleted file mode 100644 index 23999d1bfbde4..0000000000000 --- a/plugins/ingest-attachment/licenses/commons-compress-1.24.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b4b1b5a3d9573b2970fddab236102c0a4d27d35e \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/commons-compress-1.26.1.jar.sha1 b/plugins/ingest-attachment/licenses/commons-compress-1.26.1.jar.sha1 new file mode 100644 index 0000000000000..912bda85de18a --- /dev/null +++ b/plugins/ingest-attachment/licenses/commons-compress-1.26.1.jar.sha1 @@ -0,0 +1 @@ +44331c1130c370e726a2e1a3e6fba6d2558ef04a \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/commons-io-2.15.1.jar.sha1 b/plugins/ingest-attachment/licenses/commons-io-2.15.1.jar.sha1 deleted file mode 100644 index 47c5d13812a36..0000000000000 --- a/plugins/ingest-attachment/licenses/commons-io-2.15.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f11560da189ab563a5c8e351941415430e9304ea \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/commons-io-2.16.0.jar.sha1 b/plugins/ingest-attachment/licenses/commons-io-2.16.0.jar.sha1 new file mode 100644 index 0000000000000..6a7b638719fa3 --- /dev/null +++ b/plugins/ingest-attachment/licenses/commons-io-2.16.0.jar.sha1 @@ -0,0 +1 @@ +27875a7935f1ddcc13267eb6fae1f719e0409572 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/commons-lang3-3.13.0.jar.sha1 b/plugins/ingest-attachment/licenses/commons-lang3-3.13.0.jar.sha1 deleted file mode 100644 index d0c2f2486ee1f..0000000000000 --- a/plugins/ingest-attachment/licenses/commons-lang3-3.13.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b7263237aa89c1f99b327197c41d0669707a462e \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/commons-lang3-3.14.0.jar.sha1 b/plugins/ingest-attachment/licenses/commons-lang3-3.14.0.jar.sha1 new file mode 100644 index 0000000000000..d783e07e40902 --- /dev/null +++ b/plugins/ingest-attachment/licenses/commons-lang3-3.14.0.jar.sha1 @@ -0,0 +1 @@ +1ed471194b02f2c6cb734a0cd6f6f107c673afae \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/fontbox-2.0.27.jar.sha1 b/plugins/ingest-attachment/licenses/fontbox-2.0.27.jar.sha1 deleted file mode 100644 index d578dffbfa3f6..0000000000000 --- a/plugins/ingest-attachment/licenses/fontbox-2.0.27.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d08c064d18b2b149da937d15c0d1708cba03f29d \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/fontbox-2.0.31.jar.sha1 b/plugins/ingest-attachment/licenses/fontbox-2.0.31.jar.sha1 new file mode 100644 index 0000000000000..d45d45a66e072 --- /dev/null +++ b/plugins/ingest-attachment/licenses/fontbox-2.0.31.jar.sha1 @@ -0,0 +1 @@ +96999ecdb7324bf718b88724818fa62f81286c36 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/pdfbox-2.0.27.jar.sha1 b/plugins/ingest-attachment/licenses/pdfbox-2.0.27.jar.sha1 deleted file mode 100644 index 4f670b7f95e8c..0000000000000 --- a/plugins/ingest-attachment/licenses/pdfbox-2.0.27.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -416a9dfce3714116bfdf793b15368df04266845f \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/pdfbox-2.0.31.jar.sha1 b/plugins/ingest-attachment/licenses/pdfbox-2.0.31.jar.sha1 new file mode 100644 index 0000000000000..fa256ed9a65d2 --- /dev/null +++ b/plugins/ingest-attachment/licenses/pdfbox-2.0.31.jar.sha1 @@ -0,0 +1 @@ +29b25053099bc30784a766ccb821417e06f4b8a1 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-5.2.3.jar.sha1 b/plugins/ingest-attachment/licenses/poi-5.2.3.jar.sha1 deleted file mode 100644 index 3d8b3daf606ad..0000000000000 --- a/plugins/ingest-attachment/licenses/poi-5.2.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2fb22ae74ad5aea6af1a9c64b9542f2ccf348604 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-5.2.5.jar.sha1 b/plugins/ingest-attachment/licenses/poi-5.2.5.jar.sha1 new file mode 100644 index 0000000000000..0eca17726eb0b --- /dev/null +++ b/plugins/ingest-attachment/licenses/poi-5.2.5.jar.sha1 @@ -0,0 +1 @@ +7e00f6b2f76375fe89022d5a7db8acb71cbd55f5 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-ooxml-5.2.3.jar.sha1 b/plugins/ingest-attachment/licenses/poi-ooxml-5.2.3.jar.sha1 deleted file mode 100644 index 8371593cf0841..0000000000000 --- a/plugins/ingest-attachment/licenses/poi-ooxml-5.2.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -02efd11c940adb18c03eb9ce7ad88fc40ee6a196 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-ooxml-5.2.5.jar.sha1 b/plugins/ingest-attachment/licenses/poi-ooxml-5.2.5.jar.sha1 new file mode 100644 index 0000000000000..6b14be4461425 --- /dev/null +++ b/plugins/ingest-attachment/licenses/poi-ooxml-5.2.5.jar.sha1 @@ -0,0 +1 @@ +df9f2c52371eeba24db8ea8cafa77285c3cc0742 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-ooxml-lite-5.2.3.jar.sha1 b/plugins/ingest-attachment/licenses/poi-ooxml-lite-5.2.3.jar.sha1 deleted file mode 100644 index 5c6365876b7be..0000000000000 --- a/plugins/ingest-attachment/licenses/poi-ooxml-lite-5.2.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -db113c8e9051b0ff967f4911fa20336c8325a7c5 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-ooxml-lite-5.2.5.jar.sha1 b/plugins/ingest-attachment/licenses/poi-ooxml-lite-5.2.5.jar.sha1 new file mode 100644 index 0000000000000..f9a473173a297 --- /dev/null +++ b/plugins/ingest-attachment/licenses/poi-ooxml-lite-5.2.5.jar.sha1 @@ -0,0 +1 @@ +eaa61452d8f0d13080fbb4757a392f09f90e4c49 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-scratchpad-5.2.3.jar.sha1 b/plugins/ingest-attachment/licenses/poi-scratchpad-5.2.3.jar.sha1 deleted file mode 100644 index 3c8f92498f1a4..0000000000000 --- a/plugins/ingest-attachment/licenses/poi-scratchpad-5.2.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2a7fce47e22b7fedb1b277347ff4fe36d6eda50d \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-scratchpad-5.2.5.jar.sha1 b/plugins/ingest-attachment/licenses/poi-scratchpad-5.2.5.jar.sha1 new file mode 100644 index 0000000000000..68665ddafd7d8 --- /dev/null +++ b/plugins/ingest-attachment/licenses/poi-scratchpad-5.2.5.jar.sha1 @@ -0,0 +1 @@ +fc600cf765a49d73935a6e48a5b84f4abcdd0518 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-core-2.6.0.jar.sha1 b/plugins/ingest-attachment/licenses/tika-core-2.6.0.jar.sha1 deleted file mode 100644 index c66c2f3f39401..0000000000000 --- a/plugins/ingest-attachment/licenses/tika-core-2.6.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f6ed6356dd4a9bd269d873f65494376685e6192e \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-core-2.9.2.jar.sha1 b/plugins/ingest-attachment/licenses/tika-core-2.9.2.jar.sha1 new file mode 100644 index 0000000000000..80635a63d29fe --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-core-2.9.2.jar.sha1 @@ -0,0 +1 @@ +796a21391780339e3d4862626339b49df170024e \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.6.0.jar.sha1 b/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.6.0.jar.sha1 deleted file mode 100644 index e7bc59bb5ae49..0000000000000 --- a/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.6.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -72b784a7bdab0ffde005fa64d15e3f077331d6fc \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.9.2.jar.sha1 b/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.9.2.jar.sha1 new file mode 100644 index 0000000000000..a4bb6d48c6a08 --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.9.2.jar.sha1 @@ -0,0 +1 @@ +7a48a287e464b456a85c79f318d7bad7db201518 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parser-apple-module-2.9.2.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parser-apple-module-2.9.2.jar.sha1 new file mode 100644 index 0000000000000..dbaee880d1251 --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-parser-apple-module-2.9.2.jar.sha1 @@ -0,0 +1 @@ +758dac27c246c51b019562bab7e266d2da6a6e01 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parser-html-module-2.9.2.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parser-html-module-2.9.2.jar.sha1 new file mode 100644 index 0000000000000..b4806746301ef --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-parser-html-module-2.9.2.jar.sha1 @@ -0,0 +1 @@ +47f6a4c46b92616d14e82cd7ad4d05cb43077b83 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parser-microsoft-module-2.9.2.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parser-microsoft-module-2.9.2.jar.sha1 new file mode 100644 index 0000000000000..da1ae42bac652 --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-parser-microsoft-module-2.9.2.jar.sha1 @@ -0,0 +1 @@ +235a20823c02c699ce3d57f3d6b9550db05d91a9 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parser-miscoffice-module-2.9.2.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parser-miscoffice-module-2.9.2.jar.sha1 new file mode 100644 index 0000000000000..7ceed9e1643b8 --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-parser-miscoffice-module-2.9.2.jar.sha1 @@ -0,0 +1 @@ +7688a4220d07c32b505230479f957cd495c0bef2 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parser-pdf-module-2.9.2.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parser-pdf-module-2.9.2.jar.sha1 new file mode 100644 index 0000000000000..e780c1b92d525 --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-parser-pdf-module-2.9.2.jar.sha1 @@ -0,0 +1 @@ +4d0f0e3f6eff184040402094f4fabbb3c5c7d09f \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parser-text-module-2.9.2.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parser-text-module-2.9.2.jar.sha1 new file mode 100644 index 0000000000000..6e56fcffc5f88 --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-parser-text-module-2.9.2.jar.sha1 @@ -0,0 +1 @@ +b3a93e538ba6cb4066aba96d629febf181ec9f92 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parser-xml-module-2.9.2.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parser-xml-module-2.9.2.jar.sha1 new file mode 100644 index 0000000000000..27062077b92bf --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-parser-xml-module-2.9.2.jar.sha1 @@ -0,0 +1 @@ +ff707716c0c4748ffeb21996aefa8d269b3eab5b \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parser-xmp-commons-2.9.2.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parser-xmp-commons-2.9.2.jar.sha1 new file mode 100644 index 0000000000000..396e2655b14db --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-parser-xmp-commons-2.9.2.jar.sha1 @@ -0,0 +1 @@ +69104107ff85194df5acf682178128771863e442 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parser-zip-commons-2.9.2.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parser-zip-commons-2.9.2.jar.sha1 new file mode 100644 index 0000000000000..bda62033e4e8c --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-parser-zip-commons-2.9.2.jar.sha1 @@ -0,0 +1 @@ +2fcea85a56f93a5c0cb81f3d6dd8673f3d81c598 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.6.0.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.6.0.jar.sha1 deleted file mode 100644 index 83c0777fcbe8a..0000000000000 --- a/plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.6.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -00980e70b1df13c1236b750f0ca1462edd5d7417 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.9.2.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.9.2.jar.sha1 new file mode 100644 index 0000000000000..bb76974b6344e --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.9.2.jar.sha1 @@ -0,0 +1 @@ +c8408deb51fa617ef4e912b4d161712e695d3a29 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/commons-lang3-3.13.0.jar.sha1 b/plugins/repository-azure/licenses/commons-lang3-3.13.0.jar.sha1 deleted file mode 100644 index d0c2f2486ee1f..0000000000000 --- a/plugins/repository-azure/licenses/commons-lang3-3.13.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b7263237aa89c1f99b327197c41d0669707a462e \ No newline at end of file diff --git a/plugins/repository-azure/licenses/commons-lang3-3.14.0.jar.sha1 b/plugins/repository-azure/licenses/commons-lang3-3.14.0.jar.sha1 new file mode 100644 index 0000000000000..d783e07e40902 --- /dev/null +++ b/plugins/repository-azure/licenses/commons-lang3-3.14.0.jar.sha1 @@ -0,0 +1 @@ +1ed471194b02f2c6cb734a0cd6f6f107c673afae \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/commons-codec-1.15.jar.sha1 b/plugins/repository-gcs/licenses/commons-codec-1.15.jar.sha1 deleted file mode 100644 index 62d99837b87e1..0000000000000 --- a/plugins/repository-gcs/licenses/commons-codec-1.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/commons-codec-1.16.1.jar.sha1 b/plugins/repository-gcs/licenses/commons-codec-1.16.1.jar.sha1 new file mode 100644 index 0000000000000..6b8803089c6d7 --- /dev/null +++ b/plugins/repository-gcs/licenses/commons-codec-1.16.1.jar.sha1 @@ -0,0 +1 @@ +47bd4d333fba53406f6c6c51884ddbca435c8862 \ No newline at end of file diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 355d63e2fbe7d..92a9b899a960b 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -75,13 +75,13 @@ dependencies { api 'commons-collections:commons-collections:3.2.2' api "org.apache.commons:commons-compress:${versions.commonscompress}" api 'org.apache.commons:commons-configuration2:2.10.1' - api 'commons-io:commons-io:2.16.0' + api "commons-io:commons-io:${versions.commonsio}" api 'org.apache.commons:commons-lang3:3.14.0' implementation 'com.google.re2j:re2j:1.6' api 'javax.servlet:servlet-api:2.5' api "org.slf4j:slf4j-api:${versions.slf4j}" api "org.apache.logging.log4j:log4j-slf4j-impl:${versions.log4j}" - api 'net.minidev:json-smart:2.4.11' + api 'net.minidev:json-smart:2.5.1' api "io.netty:netty-all:${versions.netty}" implementation "com.fasterxml.woodstox:woodstox-core:${versions.woodstox}" implementation 'org.codehaus.woodstox:stax2-api:4.2.2' diff --git a/plugins/repository-hdfs/licenses/commons-codec-1.15.jar.sha1 b/plugins/repository-hdfs/licenses/commons-codec-1.15.jar.sha1 deleted file mode 100644 index 62d99837b87e1..0000000000000 --- a/plugins/repository-hdfs/licenses/commons-codec-1.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-codec-1.16.1.jar.sha1 b/plugins/repository-hdfs/licenses/commons-codec-1.16.1.jar.sha1 new file mode 100644 index 0000000000000..6b8803089c6d7 --- /dev/null +++ b/plugins/repository-hdfs/licenses/commons-codec-1.16.1.jar.sha1 @@ -0,0 +1 @@ +47bd4d333fba53406f6c6c51884ddbca435c8862 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-compress-1.24.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-compress-1.24.0.jar.sha1 deleted file mode 100644 index 23999d1bfbde4..0000000000000 --- a/plugins/repository-hdfs/licenses/commons-compress-1.24.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b4b1b5a3d9573b2970fddab236102c0a4d27d35e \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-compress-1.26.1.jar.sha1 b/plugins/repository-hdfs/licenses/commons-compress-1.26.1.jar.sha1 new file mode 100644 index 0000000000000..912bda85de18a --- /dev/null +++ b/plugins/repository-hdfs/licenses/commons-compress-1.26.1.jar.sha1 @@ -0,0 +1 @@ +44331c1130c370e726a2e1a3e6fba6d2558ef04a \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/json-smart-2.4.11.jar.sha1 b/plugins/repository-hdfs/licenses/json-smart-2.4.11.jar.sha1 deleted file mode 100644 index 04627ab2baace..0000000000000 --- a/plugins/repository-hdfs/licenses/json-smart-2.4.11.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cc5888f14a5768f254b97bafe8b9fd29b31e872e \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/json-smart-2.5.1.jar.sha1 b/plugins/repository-hdfs/licenses/json-smart-2.5.1.jar.sha1 new file mode 100644 index 0000000000000..fe23968afce1e --- /dev/null +++ b/plugins/repository-hdfs/licenses/json-smart-2.5.1.jar.sha1 @@ -0,0 +1 @@ +4c11d2808d009132dfbbf947ebf37de6bf266c8e \ No newline at end of file diff --git a/plugins/repository-s3/licenses/commons-codec-1.15.jar.sha1 b/plugins/repository-s3/licenses/commons-codec-1.15.jar.sha1 deleted file mode 100644 index 62d99837b87e1..0000000000000 --- a/plugins/repository-s3/licenses/commons-codec-1.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/plugins/repository-s3/licenses/commons-codec-1.16.1.jar.sha1 b/plugins/repository-s3/licenses/commons-codec-1.16.1.jar.sha1 new file mode 100644 index 0000000000000..6b8803089c6d7 --- /dev/null +++ b/plugins/repository-s3/licenses/commons-codec-1.16.1.jar.sha1 @@ -0,0 +1 @@ +47bd4d333fba53406f6c6c51884ddbca435c8862 \ No newline at end of file diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.aliases.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.aliases.json index 1ba12b00c8178..b280452e313da 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.aliases.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.aliases.json @@ -1,7 +1,7 @@ { "cat.aliases":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-alias.html", + "url":"https://opensearch.org/docs/latest/api-reference/cat/cat-aliases/", "description":"Shows information about currently configured aliases to indices including filter and routing infos." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.allocation.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.allocation.json index 717c1c49808f6..a56d3e9e8a95e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.allocation.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.allocation.json @@ -1,7 +1,7 @@ { "cat.allocation":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-allocation.html", + "url":"https://opensearch.org/docs/latest/api-reference/cat/cat-allocation/", "description":"Provides a snapshot of how many shards are allocated to each data node and how much disk space they are using." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.cluster_manager.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.cluster_manager.json index cd96038ad0693..3b999d98eb60d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.cluster_manager.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.cluster_manager.json @@ -1,7 +1,7 @@ { "cat.cluster_manager":{ "documentation":{ - "url":"https://opensearch.org/docs/latest/opensearch/rest-api/cat/cat-master/", + "url":"https://opensearch.org/docs/latest/api-reference/cat/cat-cluster_manager/", "description":"Returns information about the cluster-manager node." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.count.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.count.json index 8cfaddf8db83b..e85f399c29fd3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.count.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.count.json @@ -1,7 +1,7 @@ { "cat.count":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-count.html", + "url":"https://opensearch.org/docs/latest/api-reference/cat/cat-count/", "description":"Provides quick access to the document count of the entire cluster, or individual indices." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.fielddata.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.fielddata.json index 9fbde4736b5ef..f2a3cfa8250cb 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.fielddata.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.fielddata.json @@ -1,7 +1,7 @@ { "cat.fielddata":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-fielddata.html", + "url":"https://opensearch.org/docs/latest/api-reference/cat/cat-field-data/", "description":"Shows how much heap memory is currently being used by fielddata on every data node in the cluster." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.help.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.help.json index 54ab6d6e5168c..c2f1771977cd5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.help.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.help.json @@ -1,7 +1,7 @@ { "cat.help":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cat.html", + "url":"https://opensearch.org/docs/latest/api-reference/cat/index/", "description":"Returns help for the Cat APIs." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.indices.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.indices.json index 2491ab309531d..aa36f8fd809aa 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.indices.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.indices.json @@ -1,7 +1,7 @@ { "cat.indices":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-indices.html", + "url":"https://opensearch.org/docs/latest/api-reference/cat/cat-indices/", "description":"Returns information about indices: number of primaries and replicas, document counts, disk size, ..." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.nodeattrs.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.nodeattrs.json index c8afa4cb17039..92a7e14b234ce 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.nodeattrs.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.nodeattrs.json @@ -1,7 +1,7 @@ { "cat.nodeattrs":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-nodeattrs.html", + "url":"https://opensearch.org/docs/latest/api-reference/cat/cat-nodeattrs/", "description":"Returns information about custom node attributes." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.nodes.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.nodes.json index abb13d015b4d5..63c70afcb9465 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.nodes.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.nodes.json @@ -1,7 +1,7 @@ { "cat.nodes":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-nodes.html", + "url":"https://opensearch.org/docs/latest/api-reference/cat/cat-nodes/", "description":"Returns basic statistics about performance of cluster nodes." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.pending_tasks.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.pending_tasks.json index 9c0edf8c53d90..d89ce8af72270 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.pending_tasks.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.pending_tasks.json @@ -1,7 +1,7 @@ { "cat.pending_tasks":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-pending-tasks.html", + "url":"https://opensearch.org/docs/latest/api-reference/cat/cat-pending-tasks/", "description":"Returns a concise representation of the cluster pending tasks." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.plugins.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.plugins.json index 0b5b39b01ee58..5dd76c0dec4e4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.plugins.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.plugins.json @@ -1,7 +1,7 @@ { "cat.plugins":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-plugins.html", + "url":"https://opensearch.org/docs/latest/api-reference/cat/cat-plugins/", "description":"Returns information about installed plugins across nodes node." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.recovery.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.recovery.json index 7baf0b8ded609..a3dee81e73abc 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.recovery.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.recovery.json @@ -1,7 +1,7 @@ { "cat.recovery":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-recovery.html", + "url":"https://opensearch.org/docs/latest/api-reference/cat/cat-recovery/", "description":"Returns information about index shard recoveries, both on-going completed." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.repositories.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.repositories.json index 58960709a99bb..a8174bbd48e07 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.repositories.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.repositories.json @@ -1,7 +1,7 @@ { "cat.repositories":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-repositories.html", + "url":"https://opensearch.org/docs/latest/api-reference/cat/cat-repositories/", "description":"Returns information about snapshot repositories registered in the cluster." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.segment_replication.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.segment_replication.json index a815cd5b1101b..91d913d7c9340 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.segment_replication.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.segment_replication.json @@ -1,7 +1,7 @@ { "cat.segment_replication":{ "documentation":{ - "url":"https://github.com/opensearch-project/documentation-website/issues/2627", + "url":"https://opensearch.org/docs/latest/api-reference/cat/cat-segment-replication/", "description":"Returns information about both on-going and latest completed Segment Replication events" }, "stability":"experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.segments.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.segments.json index 5107353c7b14f..f9b0de3e2eb50 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.segments.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.segments.json @@ -1,7 +1,7 @@ { "cat.segments":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-segments.html", + "url":"https://opensearch.org/docs/latest/api-reference/cat/cat-segments/", "description":"Provides low-level information about the segments in the shards of an index." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.shards.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.shards.json index fab381a098e3f..05a04c4b580ec 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.shards.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.shards.json @@ -1,7 +1,7 @@ { "cat.shards":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-shards.html", + "url":"https://opensearch.org/docs/latest/api-reference/cat/cat-shards/", "description":"Provides a detailed view of shard allocation on nodes." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.snapshots.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.snapshots.json index 1320207abfe75..c6eb24a50e435 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.snapshots.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.snapshots.json @@ -1,7 +1,7 @@ { "cat.snapshots":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-snapshots.html", + "url":"https://opensearch.org/docs/latest/api-reference/cat/cat-snapshots/", "description":"Returns all snapshots in a specific repository." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.tasks.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.tasks.json index 384668f839642..806b993849940 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.tasks.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.tasks.json @@ -1,7 +1,7 @@ { "cat.tasks":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html", + "url":"https://opensearch.org/docs/latest/api-reference/cat/cat-tasks/", "description":"Returns information about the tasks currently executing on one or more nodes in the cluster." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.templates.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.templates.json index d45593b7bb2c8..2f4a5f6e40186 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.templates.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.templates.json @@ -1,7 +1,7 @@ { "cat.templates":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-templates.html", + "url":"https://opensearch.org/docs/latest/api-reference/cat/cat-templates/", "description":"Returns information about existing templates." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.thread_pool.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.thread_pool.json index 1165703490d1a..e5bffbb43de5b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.thread_pool.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.thread_pool.json @@ -1,7 +1,7 @@ { "cat.thread_pool":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-thread-pool.html", + "url":"https://opensearch.org/docs/latest/api-reference/cat/cat-thread-pool/", "description":"Returns cluster-wide thread pool statistics per node.\nBy default the active, queue and rejected statistics are returned for all thread pools." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/clear_scroll.json b/rest-api-spec/src/main/resources/rest-api-spec/api/clear_scroll.json index b0e50045cd7cd..ed158978e44a4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/clear_scroll.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/clear_scroll.json @@ -1,7 +1,7 @@ { "clear_scroll":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/clear-scroll-api.html", + "url":"https://opensearch.org/docs/latest/api-reference/scroll/", "description":"Explicitly clears the search context for a scroll." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.allocation_explain.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.allocation_explain.json index e46218a781e1b..2f5985faa5f63 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.allocation_explain.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.allocation_explain.json @@ -1,7 +1,7 @@ { "cluster.allocation_explain":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-allocation-explain.html", + "url":"https://opensearch.org/docs/latest/api-reference/cluster-api/cluster-allocation/", "description":"Provides explanations for shard allocations in the cluster." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.delete_component_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.delete_component_template.json index 43e14ad0e2dd8..737716cec2527 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.delete_component_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.delete_component_template.json @@ -1,7 +1,7 @@ { "cluster.delete_component_template":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-component-template.html", + "url":"https://opensearch.org/docs/latest/im-plugin/index-templates/#create-a-component-template", "description":"Deletes a component template" }, "stability":"experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.delete_decommission_awareness.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.delete_decommission_awareness.json index 13ea101169e60..24e9a59baa742 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.delete_decommission_awareness.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.delete_decommission_awareness.json @@ -1,7 +1,7 @@ { "cluster.delete_decommission_awareness": { "documentation": { - "url": "https://opensearch.org/docs/latest/opensearch/rest-api/decommission/", + "url": "https://opensearch.org/docs/latest/api-reference/cluster-api/cluster-decommission/", "description": "Delete any existing decommission." }, "stability": "experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.delete_weighted_routing.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.delete_weighted_routing.json index 274c32f2a91be..9583ae883da1c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.delete_weighted_routing.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.delete_weighted_routing.json @@ -1,7 +1,7 @@ { "cluster.delete_weighted_routing": { "documentation": { - "url": "https://opensearch.org/docs/latest/opensearch/rest-api/weighted-routing/delete", + "url": "https://opensearch.org/docs/latest/api-reference/cluster-api/cluster-awareness", "description": "Delete weighted shard routing weights" }, "stability": "experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_decommission_awareness.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_decommission_awareness.json index 302dea4ec31a7..feb760c2d7783 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_decommission_awareness.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_decommission_awareness.json @@ -1,7 +1,7 @@ { "cluster.get_decommission_awareness": { "documentation": { - "url": "https://opensearch.org/docs/latest/opensearch/rest-api/decommission/", + "url": "https://opensearch.org/docs/latest/api-reference/cluster-api/cluster-decommission/", "description": "Get details and status of decommissioned attribute" }, "stability": "experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_settings.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_settings.json index c60230dbc43b3..df3e6adb6596d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_settings.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_settings.json @@ -1,7 +1,7 @@ { "cluster.get_settings":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-update-settings.html", + "url":"https://opensearch.org/docs/latest/api-reference/cluster-api/cluster-settings/", "description":"Returns cluster settings." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_weighted_routing.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_weighted_routing.json index 95e776d2ffb8b..7dfb15696edfb 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_weighted_routing.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_weighted_routing.json @@ -1,7 +1,7 @@ { "cluster.get_weighted_routing": { "documentation": { - "url": "https://opensearch.org/docs/latest/opensearch/rest-api/weighted-routing/get", + "url": "https://opensearch.org/docs/latest/api-reference/cluster-api/cluster-awareness/", "description": "Fetches weighted shard routing weights" }, "stability": "experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.health.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.health.json index b96340d682546..db820bc4b2c85 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.health.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.health.json @@ -1,7 +1,7 @@ { "cluster.health":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-health.html", + "url":"https://opensearch.org/docs/latest/api-reference/cluster-api/cluster-health/", "description":"Returns basic information about the health of the cluster." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.put_component_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.put_component_template.json index 05558bc7bfc50..5ac77f0f5eeb1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.put_component_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.put_component_template.json @@ -1,7 +1,7 @@ { "cluster.put_component_template":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-component-template.html", + "url":"https://opensearch.org/docs/latest/im-plugin/index-templates/#create-a-component-template", "description":"Creates or updates a component template" }, "stability":"experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.put_decommission_awareness.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.put_decommission_awareness.json index bf4ffd454d9df..9bb0b4e96ff2b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.put_decommission_awareness.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.put_decommission_awareness.json @@ -1,7 +1,7 @@ { "cluster.put_decommission_awareness": { "documentation": { - "url": "https://opensearch.org/docs/latest/opensearch/rest-api/decommission/", + "url": "https://opensearch.org/docs/latest/api-reference/cluster-api/cluster-decommission/", "description": "Decommissions an awareness attribute" }, "stability": "experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.put_settings.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.put_settings.json index 1e36acc51544d..eec2a24057095 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.put_settings.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.put_settings.json @@ -1,7 +1,7 @@ { "cluster.put_settings":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-update-settings.html", + "url":"https://opensearch.org/docs/latest/api-reference/cluster-api/cluster-settings/", "description":"Updates the cluster settings." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.put_weighted_routing.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.put_weighted_routing.json index b8ce13ab33621..97d01659efd3b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.put_weighted_routing.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.put_weighted_routing.json @@ -1,7 +1,7 @@ { "cluster.put_weighted_routing": { "documentation": { - "url": "https://opensearch.org/docs/latest/opensearch/rest-api/weighted-routing/put", + "url": "https://opensearch.org/docs/latest/api-reference/cluster-api/cluster-awareness", "description": "Updates weighted shard routing weights" }, "stability": "experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.remote_info.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.remote_info.json index 4eac0b55ce6f1..32315ec39f1e4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.remote_info.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.remote_info.json @@ -1,7 +1,7 @@ { "cluster.remote_info":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-remote-info.html", + "url":"https://opensearch.org/docs/latest/api-reference/remote-info/", "description":"Returns the information about configured remote clusters." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.state.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.state.json index b43ab901785bd..b4c7240548a79 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.state.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.state.json @@ -1,7 +1,7 @@ { "cluster.state":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-state.html", + "url":"https://opensearch.org/docs/latest/api-reference/count/", "description":"Returns a comprehensive information about the state of the cluster." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.stats.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.stats.json index f36db0979f4f7..aaaab17fb891a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.stats.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.stats.json @@ -1,7 +1,7 @@ { "cluster.stats":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-stats.html", + "url":"https://opensearch.org/docs/latest/api-reference/cluster-api/cluster-stats/", "description":"Returns high-level overview of cluster statistics." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/count.json b/rest-api-spec/src/main/resources/rest-api-spec/api/count.json index 8cdb3db7c12cd..38eb435e29a9e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/count.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/count.json @@ -1,7 +1,7 @@ { "count":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/search-count.html", + "url":"https://opensearch.org/docs/latest/api-reference/count/", "description":"Returns number of documents matching a query." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/create.json b/rest-api-spec/src/main/resources/rest-api-spec/api/create.json index 767af84b82258..b3e678ace4751 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/create.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/create.json @@ -1,7 +1,7 @@ { "create":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-index_.html", + "url":"https://opensearch.org/docs/latest/api-reference/document-apis/index-document/", "description":"Creates a new document in the index.\n\nReturns a 409 response when a document with a same ID already exists in the index." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/create_pit.json b/rest-api-spec/src/main/resources/rest-api-spec/api/create_pit.json index d3a2104c01bc0..81fb3f7c3b973 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/create_pit.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/create_pit.json @@ -2,7 +2,7 @@ { "create_pit":{ "documentation":{ - "url":"https://opensearch.org/docs/latest/opensearch/rest-api/point_in_time/", + "url":"https://opensearch.org/docs/latest/search-plugins/searching-data/point-in-time-api", "description":"Creates point in time context." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/dangling_indices.delete_dangling_index.json b/rest-api-spec/src/main/resources/rest-api-spec/api/dangling_indices.delete_dangling_index.json index 5d832fc794f4f..d0c4aaefbcadf 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/dangling_indices.delete_dangling_index.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/dangling_indices.delete_dangling_index.json @@ -1,7 +1,7 @@ { "dangling_indices.delete_dangling_index": { "documentation": { - "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-gateway-dangling-indices.html", + "url": "https://opensearch.org/docs/latest/api-reference/index-apis/dangling-index/", "description": "Deletes the specified dangling index" }, "stability": "stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/dangling_indices.import_dangling_index.json b/rest-api-spec/src/main/resources/rest-api-spec/api/dangling_indices.import_dangling_index.json index 5b056e1fa145f..f6cedbe41ef53 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/dangling_indices.import_dangling_index.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/dangling_indices.import_dangling_index.json @@ -1,7 +1,7 @@ { "dangling_indices.import_dangling_index": { "documentation": { - "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-gateway-dangling-indices.html", + "url": "https://opensearch.org/docs/latest/api-reference/index-apis/dangling-index/", "description": "Imports the specified dangling index" }, "stability": "stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/dangling_indices.list_dangling_indices.json b/rest-api-spec/src/main/resources/rest-api-spec/api/dangling_indices.list_dangling_indices.json index dfc21f56ddfac..0d14c8380c7fe 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/dangling_indices.list_dangling_indices.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/dangling_indices.list_dangling_indices.json @@ -1,7 +1,7 @@ { "dangling_indices.list_dangling_indices": { "documentation": { - "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-gateway-dangling-indices.html", + "url": "https://opensearch.org/docs/latest/api-reference/index-apis/dangling-index/", "description": "Returns all dangling indices." }, "stability": "stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete.json index 76dceb455627f..668976d360999 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/delete.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete.json @@ -1,7 +1,7 @@ { "delete":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-delete.html", + "url":"https://opensearch.org/docs/latest/api-reference/document-apis/delete-document/", "description":"Removes a document from the index." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_all_pits.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_all_pits.json index 5ff01aa746df9..1e329e3c27b5b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_all_pits.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_all_pits.json @@ -1,7 +1,7 @@ { "delete_all_pits":{ "documentation":{ - "url":"https://opensearch.org/docs/latest/opensearch/rest-api/point_in_time/", + "url":"https://opensearch.org/docs/latest/search-plugins/searching-data/point-in-time-api", "description":"Deletes all active point in time searches." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json index 4c32974583aac..0c493a092e244 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json @@ -1,7 +1,7 @@ { "delete_by_query":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-delete-by-query.html", + "url":"https://opensearch.org/docs/latest/api-reference/document-apis/delete-by-query/", "description":"Deletes documents matching the provided query." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query_rethrottle.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query_rethrottle.json index 112bfc8a7d2e0..c9660fe2e4427 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query_rethrottle.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query_rethrottle.json @@ -1,7 +1,7 @@ { "delete_by_query_rethrottle":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete-by-query.html", + "url":"https://opensearch.org/docs/latest/api-reference/document-apis/delete-by-query/", "description":"Changes the number of requests per second for a particular Delete By Query operation." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_pit.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_pit.json index b54d9f76204f4..234001349d889 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_pit.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_pit.json @@ -1,7 +1,7 @@ { "delete_pit":{ "documentation":{ - "url":"https://opensearch.org/docs/latest/opensearch/rest-api/point_in_time/", + "url":"https://opensearch.org/docs/latest/search-plugins/searching-data/point-in-time-api", "description":"Deletes one or more point in time searches based on the IDs passed." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_script.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_script.json index acaa389738606..b858f71eb2812 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_script.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_script.json @@ -1,7 +1,7 @@ { "delete_script":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html", + "url":"https://opensearch.org/docs/latest/api-reference/script-apis/delete-script/", "description":"Deletes a script." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/exists.json b/rest-api-spec/src/main/resources/rest-api-spec/api/exists.json index fd221b474a070..0b4f588a673ac 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/exists.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/exists.json @@ -1,7 +1,7 @@ { "exists":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html", + "url":"https://opensearch.org/docs/latest/api-reference/document-apis/get-documents/", "description":"Returns information about whether a document exists in an index." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/exists_source.json b/rest-api-spec/src/main/resources/rest-api-spec/api/exists_source.json index bdbf818fb5d81..5c44f57c289b5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/exists_source.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/exists_source.json @@ -1,7 +1,7 @@ { "exists_source":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html", + "url":"https://opensearch.org/docs/latest/api-reference/document-apis/get-documents/", "description":"Returns information about whether a document source exists in an index." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/explain.json b/rest-api-spec/src/main/resources/rest-api-spec/api/explain.json index 7f630f7666f30..4ad263262fe14 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/explain.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/explain.json @@ -1,7 +1,7 @@ { "explain":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/search-explain.html", + "url":"https://opensearch.org/docs/latest/api-reference/explain/", "description":"Returns information about why a specific matches (or doesn't match) a query." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/get.json b/rest-api-spec/src/main/resources/rest-api-spec/api/get.json index 2ce77f17aff10..990ec4685fa54 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/get.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/get.json @@ -1,7 +1,7 @@ { "get":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html", + "url":"https://opensearch.org/docs/latest/api-reference/document-apis/get-documents/", "description":"Returns a document." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/get_all_pits.json b/rest-api-spec/src/main/resources/rest-api-spec/api/get_all_pits.json index 544a8cb11b002..9176d499e754b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/get_all_pits.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/get_all_pits.json @@ -1,7 +1,7 @@ { "get_all_pits":{ "documentation":{ - "url":"https://opensearch.org/docs/latest/opensearch/rest-api/point_in_time/", + "url":"https://opensearch.org/docs/latest/search-plugins/searching-data/point-in-time-api", "description":"Lists all active point in time searches." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/get_script_context.json b/rest-api-spec/src/main/resources/rest-api-spec/api/get_script_context.json index aa770ee9d9f2e..c5034e48e6e65 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/get_script_context.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/get_script_context.json @@ -1,7 +1,7 @@ { "get_script_context":{ "documentation":{ - "url": "https://www.elastic.co/guide/en/elasticsearch/painless/master/painless-contexts.html", + "url": "https://opensearch.org/docs/latest/api-reference/script-apis/get-script-language/", "description":"Returns all script contexts." }, "stability":"experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/get_script_languages.json b/rest-api-spec/src/main/resources/rest-api-spec/api/get_script_languages.json index a5e06cb88901b..006f34a9762b0 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/get_script_languages.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/get_script_languages.json @@ -1,7 +1,7 @@ { "get_script_languages":{ "documentation":{ - "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html", + "url": "https://opensearch.org/docs/latest/api-reference/script-apis/get-script-language/", "description":"Returns available script types, languages and contexts" }, "stability":"experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/get_source.json b/rest-api-spec/src/main/resources/rest-api-spec/api/get_source.json index ad79678388590..ab269fcc0980f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/get_source.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/get_source.json @@ -1,7 +1,7 @@ { "get_source":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html", + "url":"https://opensearch.org/docs/latest/api-reference/document-apis/get-documents/", "description":"Returns the source of a document." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/index.json b/rest-api-spec/src/main/resources/rest-api-spec/api/index.json index b4865403331b0..a02e5bbd2c6e8 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/index.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/index.json @@ -1,7 +1,7 @@ { "index":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-index_.html", + "url":"https://opensearch.org/docs/latest/api-reference/document-apis/index-document/", "description":"Creates or updates a document in an index." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.analyze.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.analyze.json index aa8e84c1985d6..ddf6b93bba0c5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.analyze.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.analyze.json @@ -1,7 +1,7 @@ { "indices.analyze":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-analyze.html", + "url":"https://opensearch.org/docs/latest/api-reference/analyze-apis/", "description":"Performs the analysis process on a text and return the tokens breakdown of the text." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.clear_cache.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.clear_cache.json index 0c7eca8c8e6f5..09ff5af473335 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.clear_cache.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.clear_cache.json @@ -1,7 +1,7 @@ { "indices.clear_cache":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-clearcache.html", + "url":"https://opensearch.org/docs/latest/api-reference/index-apis/clear-index-cache/", "description":"Clears all or specific caches for one or more indices." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.clone.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.clone.json index 2d874f4933768..0872e1b0d6266 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.clone.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.clone.json @@ -1,7 +1,7 @@ { "indices.clone": { "documentation": { - "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-clone-index.html", + "url": "https://opensearch.org/docs/latest/api-reference/index-apis/clone/", "description": "Clones an index" }, "stability": "stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.close.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.close.json index 1182b73541f93..863080befd3d6 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.close.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.close.json @@ -1,7 +1,7 @@ { "indices.close":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-open-close.html", + "url":"https://opensearch.org/docs/latest/api-reference/index-apis/close-index/", "description":"Closes an index." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.create.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.create.json index 53ea4cbd80803..c35876c9e6f15 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.create.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.create.json @@ -1,7 +1,7 @@ { "indices.create":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-create-index.html", + "url":"https://opensearch.org/docs/latest/api-reference/index-apis/create-index/", "description":"Creates an index with optional settings and mappings." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.data_streams_stats.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.data_streams_stats.json index 67f90c48eb79f..4dea89d4189b1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.data_streams_stats.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.data_streams_stats.json @@ -1,7 +1,7 @@ { "indices.data_streams_stats":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html", + "url":"https://opensearch.org/docs/latest/im-plugin/data-streams/", "description":"Provides statistics on operations happening in a data stream." }, "stability":"experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete.json index 53fdf44bb36a1..ea8a3f6b769e9 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete.json @@ -1,7 +1,7 @@ { "indices.delete":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-delete-index.html", + "url":"https://opensearch.org/docs/latest/api-reference/index-apis/delete-index/", "description":"Deletes an index." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete_data_stream.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete_data_stream.json index f824fb5207d46..e695fb4cfcf60 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete_data_stream.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete_data_stream.json @@ -1,7 +1,7 @@ { "indices.delete_data_stream":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html", + "url":"https://opensearch.org/docs/latest/im-plugin/data-streams/", "description":"Deletes a data stream." }, "stability":"experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete_template.json index 74dbb1822b64a..252ab47a8df6f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete_template.json @@ -1,7 +1,7 @@ { "indices.delete_template":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html", + "url":"https://opensearch.org/docs/latest/im-plugin/index-templates", "description":"Deletes an index template." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists.json index 7539f44a81eed..7be004cd9c4ea 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists.json @@ -1,7 +1,7 @@ { "indices.exists":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-exists.html", + "url":"https://opensearch.org/docs/latest/api-reference/index-apis/exists/", "description":"Returns information about whether a particular index exists." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_alias.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_alias.json index 66e5ce92cbbe5..88d1c00076757 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_alias.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_alias.json @@ -1,7 +1,7 @@ { "indices.exists_alias":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html", + "url":"https://opensearch.org/docs/latest/api-reference/index-apis/alias/", "description":"Returns information about whether a particular alias exists." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_index_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_index_template.json index c5312680fa880..8b050881240da 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_index_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_index_template.json @@ -1,7 +1,7 @@ { "indices.exists_index_template":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html", + "url":"https://opensearch.org/docs/latest/im-plugin/index-templates", "description":"Returns information about whether a particular index template exists." }, "stability":"experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_template.json index 9796bdd9d21ff..08ab5cf81e57d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_template.json @@ -1,7 +1,7 @@ { "indices.exists_template":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html", + "url":"https://opensearch.org/docs/latest/im-plugin/index-templates", "description":"Returns information about whether a particular index template exists." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.forcemerge.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.forcemerge.json index 986bce55f41e5..a1e066c558cb2 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.forcemerge.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.forcemerge.json @@ -1,7 +1,7 @@ { "indices.forcemerge":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-forcemerge.html", + "url":"https://opensearch.org/docs/latest/api-reference/index-apis/force-merge/", "description":"Performs the force merge operation on one or more indices." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get.json index 0a43f6481d86d..ca701cc81b0c7 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get.json @@ -1,7 +1,7 @@ { "indices.get":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-index.html", + "url":"https://opensearch.org/docs/latest/api-reference/index-apis/get-index/", "description":"Returns information about one or more indices." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_data_stream.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_data_stream.json index ce19186bea6a9..99f419e593a6f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_data_stream.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_data_stream.json @@ -1,7 +1,7 @@ { "indices.get_data_stream":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html", + "url":"https://opensearch.org/docs/latest/im-plugin/data-streams/", "description":"Returns data streams." }, "stability":"experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_field_mapping.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_field_mapping.json index 0e71b6d395777..ef627502f5250 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_field_mapping.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_field_mapping.json @@ -1,7 +1,7 @@ { "indices.get_field_mapping":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-field-mapping.html", + "url":"https://opensearch.org/docs/latest/api-reference/index-apis/put-mapping/", "description":"Returns mapping for one or more fields." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_index_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_index_template.json index fbd03f99d2547..3d10894a787f9 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_index_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_index_template.json @@ -1,7 +1,7 @@ { "indices.get_index_template":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html", + "url":"https://opensearch.org/docs/latest/im-plugin/index-templates", "description":"Returns an index template." }, "stability":"experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_mapping.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_mapping.json index 321bfaba4f941..5f401872c98c4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_mapping.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_mapping.json @@ -1,7 +1,7 @@ { "indices.get_mapping":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-mapping.html", + "url":"https://opensearch.org/docs/latest/api-reference/index-apis/put-mapping/", "description":"Returns mappings for one or more indices." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_settings.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_settings.json index 1bdaea01f87bf..b3db1ca1cbf90 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_settings.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_settings.json @@ -1,7 +1,7 @@ { "indices.get_settings":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-settings.html", + "url":"https://opensearch.org/docs/latest/api-reference/index-apis/get-settings/", "description":"Returns settings for one or more indices." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_template.json index 52aeb17913db4..7df19812f9d6b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_template.json @@ -1,7 +1,7 @@ { "indices.get_template":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html", + "url":"https://opensearch.org/docs/latest/im-plugin/index-templates", "description":"Returns an index template." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.open.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.open.json index f44fb04102a7f..e7fe207a254f6 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.open.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.open.json @@ -1,7 +1,7 @@ { "indices.open":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-open-close.html", + "url":"https://opensearch.org/docs/latest/api-reference/index-apis/open-index/", "description":"Opens an index." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_alias.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_alias.json index 00767afbaec04..c3ccd25da9f86 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_alias.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_alias.json @@ -1,7 +1,7 @@ { "indices.put_alias":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html", + "url":"https://opensearch.org/docs/latest/api-reference/index-apis/alias/", "description":"Creates or updates an alias." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_index_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_index_template.json index a2ceb259a4376..439f96943db04 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_index_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_index_template.json @@ -1,7 +1,7 @@ { "indices.put_index_template":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html", + "url":"https://opensearch.org/docs/latest/im-plugin/index-templates", "description":"Creates or updates an index template." }, "stability":"experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_mapping.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_mapping.json index c8b63d4e1cee1..2db333dc1e8ef 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_mapping.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_mapping.json @@ -1,7 +1,7 @@ { "indices.put_mapping":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-put-mapping.html", + "url":"https://opensearch.org/docs/latest/api-reference/index-apis/put-mapping/", "description":"Updates the index mappings." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_template.json index 3b1c230178bb8..bdedd5519076d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_template.json @@ -1,7 +1,7 @@ { "indices.put_template":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html", + "url":"https://opensearch.org/docs/latest/im-plugin/index-templates", "description":"Creates or updates an index template." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.shrink.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.shrink.json index a20014a1444ec..941ea1127954d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.shrink.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.shrink.json @@ -1,7 +1,7 @@ { "indices.shrink":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-shrink-index.html", + "url":"https://opensearch.org/docs/latest/api-reference/index-apis/shrink-index/", "description":"Allow to shrink an existing index into a new index with fewer primary shards." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.simulate_index_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.simulate_index_template.json index 0e42ba6028a9f..1ddb4e54d8bbd 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.simulate_index_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.simulate_index_template.json @@ -1,7 +1,7 @@ { "indices.simulate_index_template":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html", + "url":"https://opensearch.org/docs/latest/im-plugin/index-templates", "description": "Simulate matching the given index name against the index templates in the system" }, "stability":"experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.simulate_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.simulate_template.json index 65b555082c3b1..04a33fe62e1ec 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.simulate_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.simulate_template.json @@ -1,7 +1,7 @@ { "indices.simulate_template":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html", + "url":"https://opensearch.org/docs/latest/im-plugin/index-templates", "description": "Simulate resolving the given template name or body" }, "stability":"experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.split.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.split.json index d399bf9dbdb8a..af041f426b644 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.split.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.split.json @@ -1,7 +1,7 @@ { "indices.split":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-split-index.html", + "url":"https://opensearch.org/docs/latest/api-reference/index-apis/split/", "description":"Allows you to split an existing index into a new index with more primary shards." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.stats.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.stats.json index 382bb9efde0ff..71ce6dbd443f0 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.stats.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.stats.json @@ -1,7 +1,7 @@ { "indices.stats":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-stats.html", + "url":"https://opensearch.org/docs/latest/api-reference/index-apis/stats/", "description":"Provides statistics on operations happening in an index." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.update_aliases.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.update_aliases.json index c31cb8fe59c0f..467cc444b6e53 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.update_aliases.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.update_aliases.json @@ -1,7 +1,7 @@ { "indices.update_aliases":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html", + "url":"https://opensearch.org/docs/latest/api-reference/index-apis/alias/", "description":"Updates index aliases." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.delete_pipeline.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.delete_pipeline.json index 3e40136f556fa..2e615ece82c64 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.delete_pipeline.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.delete_pipeline.json @@ -1,7 +1,7 @@ { "ingest.delete_pipeline":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-pipeline-api.html", + "url":"https://opensearch.org/docs/latest/ingest-pipelines/delete-ingest/", "description":"Deletes a pipeline." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.get_pipeline.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.get_pipeline.json index cde980e67c8c9..498aac4ca5a4a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.get_pipeline.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.get_pipeline.json @@ -1,7 +1,7 @@ { "ingest.get_pipeline":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/get-pipeline-api.html", + "url":"https://opensearch.org/docs/latest/ingest-pipelines/get-ingest/", "description":"Returns a pipeline." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_pipeline.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_pipeline.json index 5475905e7b99f..0043a32a55310 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_pipeline.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_pipeline.json @@ -1,7 +1,7 @@ { "ingest.put_pipeline":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/put-pipeline-api.html", + "url":"https://opensearch.org/docs/latest/ingest-pipelines/create-ingest/", "description":"Creates or updates a pipeline." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.simulate.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.simulate.json index 8122f7a0ffa19..6f6b153195255 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.simulate.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.simulate.json @@ -1,7 +1,7 @@ { "ingest.simulate":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/simulate-pipeline-api.html", + "url":"https://opensearch.org/docs/latest/ingest-pipelines/simulate-ingest/", "description":"Allows to simulate a pipeline with example documents." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/msearch.json b/rest-api-spec/src/main/resources/rest-api-spec/api/msearch.json index 3a3a6ebe1bff5..e4b1c2a798a41 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/msearch.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/msearch.json @@ -1,7 +1,7 @@ { "msearch":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/search-multi-search.html", + "url":"https://opensearch.org/docs/latest/api-reference/multi-search/", "description":"Allows to execute several search operations in one request." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/msearch_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/msearch_template.json index 7ac194f91bf56..42aa8a23b1510 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/msearch_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/msearch_template.json @@ -1,7 +1,7 @@ { "msearch_template":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html", + "url":"https://opensearch.org/docs/latest/api-reference/search-template/", "description":"Allows to execute several search template operations in one request." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.hot_threads.json b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.hot_threads.json index 0830344dc4ad4..bbbe79b2693c2 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.hot_threads.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.hot_threads.json @@ -1,7 +1,7 @@ { "nodes.hot_threads":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-hot-threads.html", + "url":"https://opensearch.org/docs/latest/api-reference/nodes-apis/nodes-hot-threads/", "description":"Returns information about hot threads on each node in the cluster." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.info.json b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.info.json index 37279edd3106f..4dce8ec54635a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.info.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.info.json @@ -1,7 +1,7 @@ { "nodes.info":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-info.html", + "url":"https://opensearch.org/docs/latest/api-reference/nodes-apis/nodes-info/", "description":"Returns information about nodes in the cluster." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.reload_secure_settings.json b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.reload_secure_settings.json index 25dc72b6cc037..6ecbf96c1925b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.reload_secure_settings.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.reload_secure_settings.json @@ -1,7 +1,7 @@ { "nodes.reload_secure_settings":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/secure-settings.html#reloadable-secure-settings", + "url":"https://opensearch.org/docs/latest/api-reference/nodes-apis/nodes-reload-secure/", "description":"Reloads secure settings." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.stats.json b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.stats.json index cc1a9e8185093..a23b2e5428fb6 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.stats.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.stats.json @@ -1,7 +1,7 @@ { "nodes.stats":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-stats.html", + "url":"https://opensearch.org/docs/latest/api-reference/nodes-apis/nodes-stats/", "description":"Returns statistical information about nodes in the cluster." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.usage.json b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.usage.json index 5acbf7a51116c..e23011ce432a8 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.usage.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.usage.json @@ -1,7 +1,7 @@ { "nodes.usage":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-usage.html", + "url":"https://opensearch.org/docs/latest/api-reference/nodes-apis/nodes-usage/", "description":"Returns low-level information about REST actions usage on nodes." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/put_script.json b/rest-api-spec/src/main/resources/rest-api-spec/api/put_script.json index c8413d1476402..1c13ea41e3470 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/put_script.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/put_script.json @@ -1,7 +1,7 @@ { "put_script":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html", + "url":"https://opensearch.org/docs/latest/api-reference/nodes-apis/nodes-usage/", "description":"Creates or updates a script." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/rank_eval.json b/rest-api-spec/src/main/resources/rest-api-spec/api/rank_eval.json index eadf240192394..04d337a45ec3e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/rank_eval.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/rank_eval.json @@ -1,7 +1,7 @@ { "rank_eval":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/search-rank-eval.html", + "url":"https://opensearch.org/docs/latest/api-reference/rank-eval/", "description":"Allows to evaluate the quality of ranked search results over a set of typical search queries" }, "stability":"experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/reindex.json b/rest-api-spec/src/main/resources/rest-api-spec/api/reindex.json index 2fbaf86cab616..e2cb76d9ab8ca 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/reindex.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/reindex.json @@ -1,7 +1,7 @@ { "reindex":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-reindex.html", + "url":"https://opensearch.org/docs/latest/im-plugin/reindex-data/", "description":"Allows to copy documents from one index to another, optionally filtering the source\ndocuments by a query, changing the destination index settings, or fetching the\ndocuments from a remote cluster." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/reindex_rethrottle.json b/rest-api-spec/src/main/resources/rest-api-spec/api/reindex_rethrottle.json index d91365b3c49a5..eafd1122f21c2 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/reindex_rethrottle.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/reindex_rethrottle.json @@ -1,7 +1,7 @@ { "reindex_rethrottle":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-reindex.html", + "url":"https://opensearch.org/docs/latest/im-plugin/reindex-data/", "description":"Changes the number of requests per second for a particular Reindex operation." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/remote_store.restore.json b/rest-api-spec/src/main/resources/rest-api-spec/api/remote_store.restore.json index 6af49f75b9f6e..4d8efb9411bff 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/remote_store.restore.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/remote_store.restore.json @@ -1,7 +1,7 @@ { "remote_store.restore":{ "documentation":{ - "url": "https://opensearch.org/docs/latest/opensearch/rest-api/remote-store#restore", + "url": "https://opensearch.org/docs/latest/tuning-your-cluster/availability-and-recovery/remote-store/index/#restoring-from-a-backup", "description":"Restores from remote store." }, "stability":"experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/remote_store.stats.json b/rest-api-spec/src/main/resources/rest-api-spec/api/remote_store.stats.json index 437a4439bbcb5..5b456ca35d77a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/remote_store.stats.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/remote_store.stats.json @@ -1,7 +1,7 @@ { "remote_store.stats":{ "documentation":{ - "url": "https://opensearch.org/docs/latest/tuning-your-cluster/availability-and-recovery/remote", + "url": "https://opensearch.org/docs/latest/tuning-your-cluster/availability-and-recovery/remote-store/remote-store-stats-api/", "description":"Stats for remote store." }, "stability":"experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/render_search_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/render_search_template.json index c2c474edd9853..a16839aa83a7d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/render_search_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/render_search_template.json @@ -1,7 +1,7 @@ { "render_search_template":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/search-template.html#_validating_templates", + "url":"https://opensearch.org/docs/latest/api-reference/search-template/", "description":"Allows to use the Mustache language to pre-render a search definition." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/scripts_painless_execute.json b/rest-api-spec/src/main/resources/rest-api-spec/api/scripts_painless_execute.json index 9f761fb452ba1..168620c0d1f3c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/scripts_painless_execute.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/scripts_painless_execute.json @@ -1,7 +1,7 @@ { "scripts_painless_execute":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/painless/master/painless-execute-api.html", + "url":"https://opensearch.org/docs/latest/api-reference/script-apis/exec-stored-script/", "description":"Allows an arbitrary script to be executed and a result to be returned" }, "stability":"experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/scroll.json b/rest-api-spec/src/main/resources/rest-api-spec/api/scroll.json index ea0cbe2675325..3a4987ff2f6f4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/scroll.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/scroll.json @@ -1,7 +1,7 @@ { "scroll":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/search-request-body.html#request-body-search-scroll", + "url":"https://opensearch.org/docs/latest/api-reference/scroll/", "description":"Allows to retrieve a large numbers of results from a single search request." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/search.json b/rest-api-spec/src/main/resources/rest-api-spec/api/search.json index e78d49a67a98a..01120eb07250d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/search.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/search.json @@ -1,7 +1,7 @@ { "search":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/search-search.html", + "url":"https://opensearch.org/docs/latest/api-reference/search/", "description":"Returns results matching a query." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/search_pipeline.delete.json b/rest-api-spec/src/main/resources/rest-api-spec/api/search_pipeline.delete.json index 1fa7060b974dc..260bf482822cc 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/search_pipeline.delete.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/search_pipeline.delete.json @@ -2,7 +2,7 @@ "search_pipeline.delete": { "documentation": { "description": "Deletes a search pipeline.", - "url": "https://opensearch.org/docs/latest/opensearch/rest-api/search_pipelines/" + "url": "https://opensearch.org/docs/latest/search-plugins/search-pipelines/index/" }, "stability": "stable", "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/search_pipeline.get.json b/rest-api-spec/src/main/resources/rest-api-spec/api/search_pipeline.get.json index 7cac6e7aa4bcf..05aec5d5de860 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/search_pipeline.get.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/search_pipeline.get.json @@ -2,7 +2,7 @@ "search_pipeline.get": { "documentation": { "description": "Returns a search pipeline", - "url": "https://opensearch.org/docs/latest/opensearch/rest-api/search_pipelines/" + "url": "https://opensearch.org/docs/latest/search-plugins/search-pipelines/retrieving-search-pipeline/" }, "stability": "stable", "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/search_pipeline.put.json b/rest-api-spec/src/main/resources/rest-api-spec/api/search_pipeline.put.json index b7375d36825a2..90d4baea4ecf2 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/search_pipeline.put.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/search_pipeline.put.json @@ -2,7 +2,7 @@ "search_pipeline.put": { "documentation": { "description": "Creates or updates a search pipeline.", - "url": "https://opensearch.org/docs/latest/opensearch/rest-api/search_pipelines/" + "url": "https://opensearch.org/docs/latest/search-plugins/search-pipelines/creating-search-pipeline/" }, "stability": "stable", "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/search_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/search_template.json index 4230b660523b8..00fa06bb96c7e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/search_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/search_template.json @@ -1,7 +1,7 @@ { "search_template":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/search-template.html", + "url":"https://opensearch.org/docs/latest/api-reference/search-template/", "description":"Allows to use the Mustache language to pre-render a search definition." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.cleanup_repository.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.cleanup_repository.json index 05eb3309b11e6..826660ce58dea 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.cleanup_repository.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.cleanup_repository.json @@ -1,7 +1,7 @@ { "snapshot.cleanup_repository": { "documentation": { - "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/clean-up-snapshot-repo-api.html", + "url": "https://opensearch.org/docs/latest/api-reference/snapshots/index/", "description": "Removes stale data from repository." }, "stability": "stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.clone.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.clone.json index c79460fc30a48..ae83d9fa6497a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.clone.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.clone.json @@ -1,7 +1,7 @@ { "snapshot.clone":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html", + "url":"https://opensearch.org/docs/latest/api-reference/snapshots/index/", "description":"Clones indices from one snapshot into another snapshot in the same repository." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.create.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.create.json index 64aaeaef9d897..9b4abd1b41a93 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.create.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.create.json @@ -1,7 +1,7 @@ { "snapshot.create":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html", + "url":"https://opensearch.org/docs/latest/api-reference/snapshots/create-snapshot/", "description":"Creates a snapshot in a repository." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.create_repository.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.create_repository.json index 4965162bcd86c..02f7350495cfc 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.create_repository.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.create_repository.json @@ -1,7 +1,7 @@ { "snapshot.create_repository":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html", + "url":"https://opensearch.org/docs/latest/api-reference/snapshots/create-repository/", "description":"Creates a repository." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.delete.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.delete.json index 2e21a08219942..205872dfa95ea 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.delete.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.delete.json @@ -1,7 +1,7 @@ { "snapshot.delete":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html", + "url":"https://opensearch.org/docs/latest/api-reference/snapshots/delete-snapshot/", "description":"Deletes a snapshot." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.delete_repository.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.delete_repository.json index 3fc22f969784c..61a8c2b5f8086 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.delete_repository.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.delete_repository.json @@ -1,7 +1,7 @@ { "snapshot.delete_repository":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html", + "url":"https://opensearch.org/docs/latest/api-reference/snapshots/delete-snapshot-repository/", "description":"Deletes a repository." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.get.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.get.json index e084a997a61b1..41b7d728da63f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.get.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.get.json @@ -1,7 +1,7 @@ { "snapshot.get":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html", + "url":"https://opensearch.org/docs/latest/api-reference/snapshots/get-snapshot/", "description":"Returns information about a snapshot." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.get_repository.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.get_repository.json index cf03bab18c03f..dc3e4a91d2a77 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.get_repository.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.get_repository.json @@ -1,7 +1,7 @@ { "snapshot.get_repository":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html", + "url":"https://opensearch.org/docs/latest/api-reference/snapshots/get-snapshot-repository/", "description":"Returns information about a repository." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.restore.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.restore.json index 07148c7d261f4..401f612a33203 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.restore.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.restore.json @@ -1,7 +1,7 @@ { "snapshot.restore":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html", + "url":"https://opensearch.org/docs/latest/api-reference/snapshots/restore-snapshot/", "description":"Restores a snapshot." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.status.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.status.json index 4f22c24fd9a56..1ac6042941013 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.status.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.status.json @@ -1,7 +1,7 @@ { "snapshot.status":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html", + "url":"https://opensearch.org/docs/latest/api-reference/snapshots/get-snapshot-status/", "description":"Returns information about the status of a snapshot." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.verify_repository.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.verify_repository.json index 865eb15d11310..fc0fd400f8bbd 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.verify_repository.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.verify_repository.json @@ -1,7 +1,7 @@ { "snapshot.verify_repository":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html", + "url":"https://opensearch.org/docs/latest/api-reference/snapshots/verify-snapshot-repository/", "description":"Verifies a repository." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.cancel.json b/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.cancel.json index 32f90abab60f6..70dc99d7eb2da 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.cancel.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.cancel.json @@ -1,7 +1,7 @@ { "tasks.cancel":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html", + "url":"https://opensearch.org/docs/latest/api-reference/tasks/", "description":"Cancels a task, if it can be cancelled through an API." }, "stability":"experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.get.json b/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.get.json index 63646ae539de5..d10fe62aa84b8 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.get.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.get.json @@ -1,7 +1,7 @@ { "tasks.get":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html", + "url":"https://opensearch.org/docs/latest/api-reference/tasks/", "description":"Returns information about a task." }, "stability":"experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.list.json b/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.list.json index 7137114c96bff..20a6cb5f7f7d6 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.list.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.list.json @@ -1,7 +1,7 @@ { "tasks.list":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html", + "url":"https://opensearch.org/docs/latest/api-reference/tasks/", "description":"Returns a list of tasks." }, "stability":"experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/update.json b/rest-api-spec/src/main/resources/rest-api-spec/api/update.json index c8d1ed435756b..131036486e024 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/update.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/update.json @@ -1,7 +1,7 @@ { "update":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-update.html", + "url":"https://opensearch.org/docs/latest/api-reference/document-apis/update-document/", "description":"Updates a document with a script or partial document." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json b/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json index 71a0c1fc8ad95..7f6cb797da861 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json @@ -1,7 +1,7 @@ { "update_by_query":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-update-by-query.html", + "url":"https://opensearch.org/docs/latest/api-reference/document-apis/update-by-query/", "description":"Performs an update on every document in the index without changing the source,\nfor example to pick up a mapping change." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query_rethrottle.json b/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query_rethrottle.json index bd70f6e1231c9..bc266ed7df68f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query_rethrottle.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query_rethrottle.json @@ -1,7 +1,7 @@ { "update_by_query_rethrottle":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update-by-query.html", + "url":"https://opensearch.org/docs/latest/api-reference/document-apis/update-by-query/", "description":"Changes the number of requests per second for a particular Update By Query operation." }, "stability":"stable", diff --git a/server/build.gradle b/server/build.gradle index a10d010c07b00..4481284dcd0e6 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -36,6 +36,7 @@ plugins { id('opensearch.publish') id('opensearch.internal-cluster-test') id('opensearch.optional-dependencies') + id('me.champeau.gradle.japicmp') version '0.4.2' } publishing { @@ -414,3 +415,81 @@ tasks.named("sourcesJar").configure { duplicatesStrategy = DuplicatesStrategy.EXCLUDE } } + +/** Compares the current build against a snapshot build */ +tasks.register("japicmp", me.champeau.gradle.japicmp.JapicmpTask) { + oldClasspath.from(files("${buildDir}/snapshot/opensearch-${version}.jar")) + newClasspath.from(tasks.named('jar')) + onlyModified = true + failOnModification = true + ignoreMissingClasses = true + annotationIncludes = ['@org.opensearch.common.annotation.PublicApi', '@org.opensearch.common.annotation.DeprecatedApi'] + txtOutputFile = layout.buildDirectory.file("reports/java-compatibility/report.txt") + htmlOutputFile = layout.buildDirectory.file("reports/java-compatibility/report.html") + dependsOn downloadSnapshot +} + +/** If the Java API Comparison task failed, print a hint if the change should be merged from its target branch */ +gradle.taskGraph.afterTask { Task task, TaskState state -> + if (task.name == 'japicmp' && state.failure != null) { + def sha = getGitShaFromJar("${buildDir}/snapshot/opensearch-${version}.jar") + logger.info("Incompatiable java api from snapshot jar built off of commit ${sha}") + + if (!inHistory(sha)) { + logger.warn('\u001B[33mPlease merge from the target branch and run this task again.\u001B[0m') + } + } +} + +/** Downloads latest snapshot from maven repository */ +tasks.register("downloadSnapshot", Copy) { + def mavenSnapshotRepoUrl = "https://aws.oss.sonatype.org/content/repositories/snapshots/" + def groupId = "org.opensearch" + def artifactId = "opensearch" + + repositories { + maven { + url mavenSnapshotRepoUrl + } + } + + configurations { + snapshotArtifact + } + + dependencies { + snapshotArtifact("${groupId}:${artifactId}:${version}:") + } + + from configurations.snapshotArtifact + into "$buildDir/snapshot" +} + +/** Check if the sha is in the current history */ +def inHistory(String sha) { + try { + def commandCheckSha = "git merge-base --is-ancestor ${sha} HEAD" + commandCheckSha.execute() + return true + } catch (Exception) { + return false + } +} + +/** Extracts the Git SHA used to build a jar from its manifest */ +def getGitShaFromJar(String jarPath) { + def sha = '' + try { + // Open the JAR file + def jarFile = new java.util.jar.JarFile(jarPath) + // Get the manifest from the JAR file + def manifest = jarFile.manifest + def attributes = manifest.mainAttributes + // Assuming the Git SHA is stored under an attribute named 'Git-SHA' + sha = attributes.getValue('Change') + jarFile.close() + } catch (IOException e) { + println "Failed to read the JAR file: $e.message" + } + return sha +} diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/GatewayRecoveryTestUtils.java b/server/src/internalClusterTest/java/org/opensearch/gateway/GatewayRecoveryTestUtils.java index 2b6a5b4ee6867..dc157681be6fa 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/GatewayRecoveryTestUtils.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/GatewayRecoveryTestUtils.java @@ -54,7 +54,7 @@ public static Map prepareRequestMap(String[] indices, ); for (int shardIdNum = 0; shardIdNum < primaryShardCount; shardIdNum++) { final ShardId shardId = new ShardId(index, shardIdNum); - shardIdShardAttributesMap.put(shardId, new ShardAttributes(shardId, customDataPath)); + shardIdShardAttributesMap.put(shardId, new ShardAttributes(customDataPath)); } } return shardIdShardAttributesMap; diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java index 30edea6551067..669e24f9fb555 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java @@ -31,6 +31,9 @@ import java.util.stream.Collectors; import static org.opensearch.cluster.routing.ShardRoutingState.STARTED; +import static org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator.PREFER_PRIMARY_SHARD_BALANCE; +import static org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator.PREFER_PRIMARY_SHARD_REBALANCE; +import static org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator.PRIMARY_SHARD_REBALANCE_BUFFER; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) @@ -58,6 +61,20 @@ public void enablePreferPrimaryBalance() { ); } + public void setAllocationRelocationStrategy(boolean preferPrimaryBalance, boolean preferPrimaryRebalance, float buffer) { + assertAcked( + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings( + Settings.builder() + .put(PREFER_PRIMARY_SHARD_BALANCE.getKey(), preferPrimaryBalance) + .put(PREFER_PRIMARY_SHARD_REBALANCE.getKey(), preferPrimaryRebalance) + .put(PRIMARY_SHARD_REBALANCE_BUFFER.getKey(), buffer) + ) + ); + } + /** * This test verifies that the overall primary balance is attained during allocation. This test verifies primary * balance per index and across all indices is maintained. @@ -87,7 +104,7 @@ public void testGlobalPrimaryAllocation() throws Exception { state = client().admin().cluster().prepareState().execute().actionGet().getState(); logger.info(ShardAllocations.printShardDistribution(state)); verifyPerIndexPrimaryBalance(); - verifyPrimaryBalance(); + verifyPrimaryBalance(0.0f); } /** @@ -224,6 +241,70 @@ public void testAllocationWithDisruption() throws Exception { verifyPerIndexPrimaryBalance(); } + /** + * Similar to testSingleIndexShardAllocation test but creates multiple indices, multiple nodes adding in and getting + * removed. The test asserts post each such event that primary shard distribution is balanced for each index as well as across the nodes + * when the PREFER_PRIMARY_SHARD_REBALANCE is set to true + */ + public void testAllocationAndRebalanceWithDisruption() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + final int maxReplicaCount = 2; + final int maxShardCount = 2; + // Create higher number of nodes than number of shards to reduce chances of SameShardAllocationDecider kicking-in + // and preventing primary relocations + final int nodeCount = randomIntBetween(5, 10); + final int numberOfIndices = randomIntBetween(1, 10); + final float buffer = randomIntBetween(1, 4) * 0.10f; + + logger.info("--> Creating {} nodes", nodeCount); + final List nodeNames = new ArrayList<>(); + for (int i = 0; i < nodeCount; i++) { + nodeNames.add(internalCluster().startNode()); + } + setAllocationRelocationStrategy(true, true, buffer); + + int shardCount, replicaCount; + ClusterState state; + for (int i = 0; i < numberOfIndices; i++) { + shardCount = randomIntBetween(1, maxShardCount); + replicaCount = randomIntBetween(1, maxReplicaCount); + logger.info("--> Creating index test{} with primary {} and replica {}", i, shardCount, replicaCount); + createIndex("test" + i, shardCount, replicaCount, i % 2 == 0); + ensureGreen(TimeValue.timeValueSeconds(60)); + if (logger.isTraceEnabled()) { + state = client().admin().cluster().prepareState().execute().actionGet().getState(); + logger.info(ShardAllocations.printShardDistribution(state)); + } + } + state = client().admin().cluster().prepareState().execute().actionGet().getState(); + logger.info(ShardAllocations.printShardDistribution(state)); + verifyPerIndexPrimaryBalance(); + verifyPrimaryBalance(buffer); + + final int additionalNodeCount = randomIntBetween(1, 5); + logger.info("--> Adding {} nodes", additionalNodeCount); + + internalCluster().startNodes(additionalNodeCount); + ensureGreen(TimeValue.timeValueSeconds(60)); + state = client().admin().cluster().prepareState().execute().actionGet().getState(); + logger.info(ShardAllocations.printShardDistribution(state)); + verifyPerIndexPrimaryBalance(); + verifyPrimaryBalance(buffer); + + int nodeCountToStop = additionalNodeCount; + while (nodeCountToStop > 0) { + internalCluster().stopRandomDataNode(); + // give replica a chance to promote as primary before terminating node containing the replica + ensureGreen(TimeValue.timeValueSeconds(60)); + nodeCountToStop--; + } + state = client().admin().cluster().prepareState().execute().actionGet().getState(); + logger.info("--> Cluster state post nodes stop {}", state); + logger.info(ShardAllocations.printShardDistribution(state)); + verifyPerIndexPrimaryBalance(); + verifyPrimaryBalance(buffer); + } + /** * Utility method which ensures cluster has balanced primary shard distribution across a single index. * @throws Exception exception @@ -263,7 +344,7 @@ private void verifyPerIndexPrimaryBalance() throws Exception { }, 60, TimeUnit.SECONDS); } - private void verifyPrimaryBalance() throws Exception { + private void verifyPrimaryBalance(float buffer) throws Exception { assertBusy(() -> { final ClusterState currentState = client().admin().cluster().prepareState().execute().actionGet().getState(); RoutingNodes nodes = currentState.getRoutingNodes(); @@ -278,7 +359,7 @@ private void verifyPrimaryBalance() throws Exception { .filter(ShardRouting::primary) .collect(Collectors.toList()) .size(); - assertTrue(primaryCount <= avgPrimaryShardsPerNode); + assertTrue(primaryCount <= (avgPrimaryShardsPerNode * (1 + buffer))); } }, 60, TimeUnit.SECONDS); } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteMigrationAllocationDeciderIT.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteMigrationAllocationDeciderIT.java new file mode 100644 index 0000000000000..de425ffc63816 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteMigrationAllocationDeciderIT.java @@ -0,0 +1,130 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotemigration; + +import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.routing.UnassignedInfo; +import org.opensearch.cluster.routing.allocation.command.MoveAllocationCommand; +import org.opensearch.common.Priority; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.io.IOException; +import java.util.List; + +import static org.opensearch.node.remotestore.RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class RemoteMigrationAllocationDeciderIT extends MigrationBaseTestCase { + + // When the primary is on doc rep node, existing replica copy can get allocated on excluded docrep node. + public void testFilterAllocationSkipsReplica() throws IOException { + addRemote = false; + List docRepNodes = internalCluster().startNodes(3); + createIndex( + "test", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "0") + .build() + ); + ensureGreen("test"); + + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.persistentSettings( + Settings.builder() + .put(MIGRATION_DIRECTION_SETTING.getKey(), "remote_store") + .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed") + ); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + assertTrue( + internalCluster().client() + .admin() + .indices() + .prepareUpdateSettings("test") + .setSettings(Settings.builder().put("index.routing.allocation.exclude._name", String.join(",", docRepNodes))) + .execute() + .actionGet() + .isAcknowledged() + ); + internalCluster().stopRandomDataNode(); + ensureGreen("test"); + } + + // When the primary is on remote node, new replica copy shouldn't get allocated on an excluded docrep node. + public void testFilterAllocationSkipsReplicaOnExcludedNode() throws IOException { + addRemote = false; + List nodes = internalCluster().startNodes(2); + createIndex( + "test", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "0") + .build() + ); + ensureGreen("test"); + addRemote = true; + + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.persistentSettings( + Settings.builder() + .put(MIGRATION_DIRECTION_SETTING.getKey(), "remote_store") + .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed") + ); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + String remoteNode = internalCluster().startNode(); + + client().admin() + .cluster() + .prepareReroute() + .add(new MoveAllocationCommand("test", 0, primaryNodeName("test"), remoteNode)) + .execute() + .actionGet(); + client().admin() + .cluster() + .prepareHealth() + .setTimeout(TimeValue.timeValueSeconds(60)) + .setWaitForEvents(Priority.LANGUID) + .setWaitForNoRelocatingShards(true) + .execute() + .actionGet(); + assertEquals(remoteNode, primaryNodeName("test")); + + assertTrue( + internalCluster().client() + .admin() + .indices() + .prepareUpdateSettings("test") + .setSettings(Settings.builder().put("index.routing.allocation.exclude._name", String.join(",", nodes))) + .execute() + .actionGet() + .isAcknowledged() + ); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNodeName("test"))); + ClusterHealthResponse clusterHealthResponse = client().admin() + .cluster() + .prepareHealth() + .setWaitForEvents(Priority.LANGUID) + .setWaitForGreenStatus() + .setTimeout(TimeValue.timeValueSeconds(2)) + .execute() + .actionGet(); + assertTrue(clusterHealthResponse.isTimedOut()); + ensureYellow("test"); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationSettingsUpdateIT.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationSettingsUpdateIT.java new file mode 100644 index 0000000000000..5ae2a976f4066 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationSettingsUpdateIT.java @@ -0,0 +1,187 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotemigration; + +import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; +import org.opensearch.client.Client; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.index.IndexSettings; +import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.snapshots.SnapshotInfo; +import org.opensearch.snapshots.SnapshotState; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.nio.file.Path; +import java.util.Optional; + +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; +import static org.opensearch.index.IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.CompatibilityMode.MIXED; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.Direction.REMOTE_STORE; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class RemoteStoreMigrationSettingsUpdateIT extends RemoteStoreMigrationShardAllocationBaseTestCase { + + private Client client; + + // remote store backed index setting tests + + public void testNewIndexIsRemoteStoreBackedForRemoteStoreDirectionAndMixedMode() { + logger.info("Initialize cluster: gives non remote cluster manager"); + initializeCluster(false); + + String indexName1 = "test_index_1"; + String indexName2 = "test_index_2"; + + logger.info("Add non-remote node"); + addRemote = false; + String nonRemoteNodeName = internalCluster().startNode(); + internalCluster().validateClusterFormed(); + assertNodeInCluster(nonRemoteNodeName); + + logger.info("Create an index"); + prepareIndexWithoutReplica(Optional.of(indexName1)); + + logger.info("Verify that non remote-backed index is created"); + assertNonRemoteStoreBackedIndex(indexName1); + + logger.info("Set mixed cluster compatibility mode and remote_store direction"); + setClusterMode(MIXED.mode); + setDirection(REMOTE_STORE.direction); + + logger.info("Add remote node"); + addRemote = true; + String remoteNodeName = internalCluster().startNode(); + internalCluster().validateClusterFormed(); + assertNodeInCluster(remoteNodeName); + + logger.info("Create another index"); + prepareIndexWithoutReplica(Optional.of(indexName2)); + + logger.info("Verify that remote backed index is created"); + assertRemoteStoreBackedIndex(indexName2); + } + + public void testNewRestoredIndexIsRemoteStoreBackedForRemoteStoreDirectionAndMixedMode() throws Exception { + logger.info("Initialize cluster: gives non remote cluster manager"); + initializeCluster(false); + + logger.info("Add remote and non-remote nodes"); + setClusterMode(MIXED.mode); + addRemote = false; + String nonRemoteNodeName = internalCluster().startNode(); + addRemote = true; + String remoteNodeName = internalCluster().startNode(); + internalCluster().validateClusterFormed(); + assertNodeInCluster(nonRemoteNodeName); + assertNodeInCluster(remoteNodeName); + + logger.info("Create a non remote-backed index"); + client.admin() + .indices() + .prepareCreate(TEST_INDEX) + .setSettings( + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build() + ) + .get(); + + logger.info("Verify that non remote stored backed index is created"); + assertNonRemoteStoreBackedIndex(TEST_INDEX); + + logger.info("Create repository"); + String snapshotName = "test-snapshot"; + String snapshotRepoName = "test-restore-snapshot-repo"; + Path snapshotRepoNameAbsolutePath = randomRepoPath().toAbsolutePath(); + assertAcked( + clusterAdmin().preparePutRepository(snapshotRepoName) + .setType("fs") + .setSettings(Settings.builder().put("location", snapshotRepoNameAbsolutePath)) + ); + + logger.info("Create snapshot of non remote stored backed index"); + + SnapshotInfo snapshotInfo = client().admin() + .cluster() + .prepareCreateSnapshot(snapshotRepoName, snapshotName) + .setIndices(TEST_INDEX) + .setWaitForCompletion(true) + .get() + .getSnapshotInfo(); + + assertEquals(SnapshotState.SUCCESS, snapshotInfo.state()); + assertTrue(snapshotInfo.successfulShards() > 0); + assertEquals(0, snapshotInfo.failedShards()); + + logger.info("Restore index from snapshot under NONE direction"); + String restoredIndexName1 = TEST_INDEX + "-restored1"; + restoreSnapshot(snapshotRepoName, snapshotName, restoredIndexName1); + + logger.info("Verify that restored index is non remote-backed"); + assertNonRemoteStoreBackedIndex(restoredIndexName1); + + logger.info("Restore index from snapshot under REMOTE_STORE direction"); + setDirection(REMOTE_STORE.direction); + String restoredIndexName2 = TEST_INDEX + "-restored2"; + restoreSnapshot(snapshotRepoName, snapshotName, restoredIndexName2); + + logger.info("Verify that restored index is non remote-backed"); + assertRemoteStoreBackedIndex(restoredIndexName2); + } + + // restore indices from a snapshot + private void restoreSnapshot(String snapshotRepoName, String snapshotName, String restoredIndexName) { + RestoreSnapshotResponse restoreSnapshotResponse = client.admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepoName, snapshotName) + .setWaitForCompletion(false) + .setIndices(TEST_INDEX) + .setRenamePattern(TEST_INDEX) + .setRenameReplacement(restoredIndexName) + .get(); + + assertEquals(restoreSnapshotResponse.status(), RestStatus.ACCEPTED); + ensureGreen(restoredIndexName); + } + + // verify that the created index is not remote store backed + private void assertNonRemoteStoreBackedIndex(String indexName) { + Settings indexSettings = client.admin().indices().prepareGetIndex().execute().actionGet().getSettings().get(indexName); + assertEquals(ReplicationType.DOCUMENT.toString(), indexSettings.get(SETTING_REPLICATION_TYPE)); + assertNull(indexSettings.get(SETTING_REMOTE_STORE_ENABLED)); + assertNull(indexSettings.get(SETTING_REMOTE_SEGMENT_STORE_REPOSITORY)); + assertNull(indexSettings.get(SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY)); + } + + // verify that the created index is remote store backed + private void assertRemoteStoreBackedIndex(String indexName) { + Settings indexSettings = client.admin().indices().prepareGetIndex().execute().actionGet().getSettings().get(indexName); + assertEquals(ReplicationType.SEGMENT.toString(), indexSettings.get(SETTING_REPLICATION_TYPE)); + assertEquals("true", indexSettings.get(SETTING_REMOTE_STORE_ENABLED)); + assertEquals(REPOSITORY_NAME, indexSettings.get(SETTING_REMOTE_SEGMENT_STORE_REPOSITORY)); + assertEquals(REPOSITORY_2_NAME, indexSettings.get(SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY)); + assertEquals( + IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, + INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.get(indexSettings) + ); + } + + // bootstrap a cluster + private void initializeCluster(boolean remoteClusterManager) { + addRemote = remoteClusterManager; + internalCluster().startClusterManagerOnlyNode(); + client = internalCluster().client(); + } + +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationShardAllocationBaseTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationShardAllocationBaseTestCase.java new file mode 100644 index 0000000000000..ad2302d1ab2e1 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationShardAllocationBaseTestCase.java @@ -0,0 +1,101 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotemigration; + +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.IndexShardRoutingTable; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.common.settings.Settings; + +import java.util.Map; +import java.util.Optional; + +import static org.opensearch.node.remotestore.RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +public class RemoteStoreMigrationShardAllocationBaseTestCase extends MigrationBaseTestCase { + protected static final String TEST_INDEX = "test_index"; + protected static final String NAME = "remote_store_migration"; + + protected final ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + + // set the compatibility mode of cluster [strict, mixed] + protected void setClusterMode(String mode) { + updateSettingsRequest.persistentSettings(Settings.builder().put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), mode)); + assertAcked(internalCluster().client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + } + + // set the migration direction for cluster [remote_store, docrep, none] + public void setDirection(String direction) { + updateSettingsRequest.persistentSettings(Settings.builder().put(MIGRATION_DIRECTION_SETTING.getKey(), direction)); + assertAcked(internalCluster().client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + } + + // verify that the given nodeName exists in cluster + protected DiscoveryNode assertNodeInCluster(String nodeName) { + Map nodes = internalCluster().client().admin().cluster().prepareState().get().getState().nodes().getNodes(); + DiscoveryNode discoveryNode = null; + for (Map.Entry entry : nodes.entrySet()) { + DiscoveryNode node = entry.getValue(); + if (node.getName().equals(nodeName)) { + discoveryNode = node; + break; + } + } + assertNotNull(discoveryNode); + return discoveryNode; + } + + // returns a comma-separated list of node names excluding `except` + protected String allNodesExcept(String except) { + StringBuilder exclude = new StringBuilder(); + DiscoveryNodes allNodes = internalCluster().client().admin().cluster().prepareState().get().getState().nodes(); + for (DiscoveryNode node : allNodes) { + if (node.getName().equals(except) == false) { + exclude.append(node.getName()).append(","); + } + } + return exclude.toString(); + } + + // create a new test index + protected void prepareIndexWithoutReplica(Optional name) { + String indexName = name.orElse(TEST_INDEX); + internalCluster().client() + .admin() + .indices() + .prepareCreate(indexName) + .setSettings( + Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + .put("index.routing.allocation.exclude._name", allNodesExcept(null)) + ) + .execute() + .actionGet(); + } + + protected ShardRouting getShardRouting(boolean isPrimary) { + IndexShardRoutingTable table = internalCluster().client() + .admin() + .cluster() + .prepareState() + .execute() + .actionGet() + .getState() + .getRoutingTable() + .index(TEST_INDEX) + .shard(0); + return (isPrimary ? table.primaryShard() : table.replicaShards().get(0)); + } + +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java index a31d203058565..640b83f194c1c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java @@ -19,7 +19,6 @@ import static org.opensearch.node.remotestore.RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING; import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; -import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/ResizeIndexMigrationTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/ResizeIndexMigrationTestCase.java new file mode 100644 index 0000000000000..b57bc60c50e8c --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/ResizeIndexMigrationTestCase.java @@ -0,0 +1,217 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotemigration; + +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.opensearch.action.admin.indices.shrink.ResizeType; +import org.opensearch.action.support.ActiveShardCount; +import org.opensearch.common.settings.Settings; +import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.test.OpenSearchIntegTestCase; + +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class ResizeIndexMigrationTestCase extends MigrationBaseTestCase { + private static final String TEST_INDEX = "test_index"; + private final static String REMOTE_STORE_DIRECTION = "remote_store"; + private final static String DOC_REP_DIRECTION = "docrep"; + private final static String MIXED_MODE = "mixed"; + + /* + * This test will verify the resize request failure, when cluster mode is mixed + * and index is on DocRep node, and migration to remote store is in progress. + * */ + public void testFailResizeIndexWhileDocRepToRemoteStoreMigration() throws Exception { + addRemote = false; + // create a docrep cluster + internalCluster().startClusterManagerOnlyNode(); + internalCluster().validateClusterFormed(); + + // add a non-remote node + String nonRemoteNodeName = internalCluster().startDataOnlyNode(); + internalCluster().validateClusterFormed(); + + logger.info("-->Create index on non-remote node and SETTING_REMOTE_STORE_ENABLED is false. Resize should not happen"); + Settings.Builder builder = Settings.builder().put(SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT); + internalCluster().client() + .admin() + .indices() + .prepareCreate(TEST_INDEX) + .setSettings( + builder.put("index.number_of_shards", 10) + .put("index.number_of_replicas", 0) + .put("index.routing.allocation.include._name", nonRemoteNodeName) + ) + .setWaitForActiveShards(ActiveShardCount.ALL) + .execute() + .actionGet(); + + // set mixed mode + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.persistentSettings(Settings.builder().put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), MIXED_MODE)); + assertAcked(internalCluster().client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + // add a remote node + addRemote = true; + String remoteNodeName = internalCluster().startDataOnlyNode(); + internalCluster().validateClusterFormed(); + + // set remote store migration direction + updateSettingsRequest.persistentSettings(Settings.builder().put(MIGRATION_DIRECTION_SETTING.getKey(), REMOTE_STORE_DIRECTION)); + assertAcked(internalCluster().client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + ResizeType resizeType; + int resizeShardsNum; + String cause; + switch (randomIntBetween(0, 2)) { + case 0: + resizeType = ResizeType.SHRINK; + resizeShardsNum = 5; + cause = "shrink_index"; + break; + case 1: + resizeType = ResizeType.SPLIT; + resizeShardsNum = 20; + cause = "split_index"; + break; + default: + resizeType = ResizeType.CLONE; + resizeShardsNum = 10; + cause = "clone_index"; + } + + internalCluster().client() + .admin() + .indices() + .prepareUpdateSettings(TEST_INDEX) + .setSettings(Settings.builder().put("index.blocks.write", true)) + .execute() + .actionGet(); + + ensureGreen(TEST_INDEX); + + Settings.Builder resizeSettingsBuilder = Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", resizeShardsNum) + .putNull("index.blocks.write"); + + IllegalStateException ex = expectThrows( + IllegalStateException.class, + () -> internalCluster().client() + .admin() + .indices() + .prepareResizeIndex(TEST_INDEX, "first_split") + .setResizeType(resizeType) + .setSettings(resizeSettingsBuilder.build()) + .get() + ); + assertEquals( + ex.getMessage(), + "Index " + resizeType + " is not allowed as remote migration mode is mixed" + " and index is remote store disabled" + ); + } + + /* + * This test will verify the resize request failure, when cluster mode is mixed + * and index is on Remote Store node, and migration to DocRep node is in progress. + * */ + public void testFailResizeIndexWhileRemoteStoreToDocRepMigration() throws Exception { + // creates a remote cluster + addRemote = true; + internalCluster().startClusterManagerOnlyNode(); + internalCluster().validateClusterFormed(); + + // add a remote node + String remoteNodeName = internalCluster().startDataOnlyNode(); + internalCluster().validateClusterFormed(); + + logger.info("--> Create index on remote node and SETTING_REMOTE_STORE_ENABLED is true. Resize should not happen"); + Settings.Builder builder = Settings.builder().put(SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT); + internalCluster().client() + .admin() + .indices() + .prepareCreate(TEST_INDEX) + .setSettings( + builder.put("index.number_of_shards", 10) + .put("index.number_of_replicas", 0) + .put("index.routing.allocation.include._name", remoteNodeName) + ) + .setWaitForActiveShards(ActiveShardCount.ALL) + .execute() + .actionGet(); + + // set mixed mode + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.persistentSettings(Settings.builder().put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), MIXED_MODE)); + assertAcked(internalCluster().client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + // add a non-remote node + addRemote = false; + String nonRemoteNodeName = internalCluster().startDataOnlyNode(); + internalCluster().validateClusterFormed(); + + // set docrep migration direction + updateSettingsRequest.persistentSettings(Settings.builder().put(MIGRATION_DIRECTION_SETTING.getKey(), DOC_REP_DIRECTION)); + assertAcked(internalCluster().client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + ResizeType resizeType; + int resizeShardsNum; + String cause; + switch (randomIntBetween(0, 2)) { + case 0: + resizeType = ResizeType.SHRINK; + resizeShardsNum = 5; + cause = "shrink_index"; + break; + case 1: + resizeType = ResizeType.SPLIT; + resizeShardsNum = 20; + cause = "split_index"; + break; + default: + resizeType = ResizeType.CLONE; + resizeShardsNum = 10; + cause = "clone_index"; + } + + internalCluster().client() + .admin() + .indices() + .prepareUpdateSettings(TEST_INDEX) + .setSettings(Settings.builder().put("index.blocks.write", true)) + .execute() + .actionGet(); + + ensureGreen(TEST_INDEX); + + Settings.Builder resizeSettingsBuilder = Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", resizeShardsNum) + .putNull("index.blocks.write"); + + IllegalStateException ex = expectThrows( + IllegalStateException.class, + () -> internalCluster().client() + .admin() + .indices() + .prepareResizeIndex(TEST_INDEX, "first_split") + .setResizeType(resizeType) + .setSettings(resizeSettingsBuilder.build()) + .get() + ); + assertEquals( + ex.getMessage(), + "Index " + resizeType + " is not allowed as remote migration mode is mixed" + " and index is remote store enabled" + ); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java index 21ce4be9981fb..255b17b964663 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java @@ -12,8 +12,6 @@ import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; -import org.opensearch.action.admin.indices.get.GetIndexRequest; -import org.opensearch.action.admin.indices.get.GetIndexResponse; import org.opensearch.action.delete.DeleteResponse; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.client.Client; @@ -31,6 +29,7 @@ import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.snapshots.AbstractSnapshotIntegTestCase; import org.opensearch.snapshots.SnapshotInfo; +import org.opensearch.snapshots.SnapshotRestoreException; import org.opensearch.snapshots.SnapshotState; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; @@ -48,7 +47,6 @@ import java.util.stream.Collectors; import java.util.stream.Stream; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @@ -107,7 +105,7 @@ private void assertDocsPresentInIndex(Client client, String indexName, int numOf } } - public void testRestoreOperationsShallowCopyEnabled() throws IOException, ExecutionException, InterruptedException { + public void testRestoreOperationsShallowCopyEnabled() throws Exception { String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(); String primary = internalCluster().startDataOnlyNode(); String indexName1 = "testindex1"; @@ -118,8 +116,6 @@ public void testRestoreOperationsShallowCopyEnabled() throws IOException, Execut Path absolutePath1 = randomRepoPath().toAbsolutePath(); logger.info("Snapshot Path [{}]", absolutePath1); String restoredIndexName1 = indexName1 + "-restored"; - String restoredIndexName1Seg = indexName1 + "-restored-seg"; - String restoredIndexName1Doc = indexName1 + "-restored-doc"; String restoredIndexName2 = indexName2 + "-restored"; createRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, true)); @@ -201,60 +197,6 @@ public void testRestoreOperationsShallowCopyEnabled() throws IOException, Execut indexDocuments(client, restoredIndexName1, numDocsInIndex1, numDocsInIndex1 + 2); ensureGreen(restoredIndexName1); assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1 + 2); - - // restore index as seg rep enabled with remote store and remote translog disabled - RestoreSnapshotResponse restoreSnapshotResponse3 = client.admin() - .cluster() - .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) - .setWaitForCompletion(false) - .setIgnoreIndexSettings(IndexMetadata.SETTING_REMOTE_STORE_ENABLED) - .setIndices(indexName1) - .setRenamePattern(indexName1) - .setRenameReplacement(restoredIndexName1Seg) - .get(); - assertEquals(restoreSnapshotResponse3.status(), RestStatus.ACCEPTED); - ensureGreen(restoredIndexName1Seg); - - GetIndexResponse getIndexResponse = client.admin() - .indices() - .getIndex(new GetIndexRequest().indices(restoredIndexName1Seg).includeDefaults(true)) - .get(); - indexSettings = getIndexResponse.settings().get(restoredIndexName1Seg); - assertNull(indexSettings.get(SETTING_REMOTE_STORE_ENABLED)); - assertNull(indexSettings.get(SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, null)); - assertEquals(ReplicationType.SEGMENT.toString(), indexSettings.get(IndexMetadata.SETTING_REPLICATION_TYPE)); - assertDocsPresentInIndex(client, restoredIndexName1Seg, numDocsInIndex1); - // indexing some new docs and validating - indexDocuments(client, restoredIndexName1Seg, numDocsInIndex1, numDocsInIndex1 + 2); - ensureGreen(restoredIndexName1Seg); - assertDocsPresentInIndex(client, restoredIndexName1Seg, numDocsInIndex1 + 2); - - // restore index as doc rep based from shallow copy snapshot - RestoreSnapshotResponse restoreSnapshotResponse4 = client.admin() - .cluster() - .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) - .setWaitForCompletion(false) - .setIgnoreIndexSettings(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, IndexMetadata.SETTING_REPLICATION_TYPE) - .setIndices(indexName1) - .setRenamePattern(indexName1) - .setRenameReplacement(restoredIndexName1Doc) - .get(); - assertEquals(restoreSnapshotResponse4.status(), RestStatus.ACCEPTED); - ensureGreen(restoredIndexName1Doc); - - getIndexResponse = client.admin() - .indices() - .getIndex(new GetIndexRequest().indices(restoredIndexName1Doc).includeDefaults(true)) - .get(); - indexSettings = getIndexResponse.settings().get(restoredIndexName1Doc); - assertNull(indexSettings.get(SETTING_REMOTE_STORE_ENABLED)); - assertNull(indexSettings.get(SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, null)); - assertNull(indexSettings.get(IndexMetadata.SETTING_REPLICATION_TYPE)); - assertDocsPresentInIndex(client, restoredIndexName1Doc, numDocsInIndex1); - // indexing some new docs and validating - indexDocuments(client, restoredIndexName1Doc, numDocsInIndex1, numDocsInIndex1 + 2); - ensureGreen(restoredIndexName1Doc); - assertDocsPresentInIndex(client, restoredIndexName1Doc, numDocsInIndex1 + 2); } public void testRestoreInSameRemoteStoreEnabledIndex() throws IOException { @@ -448,83 +390,6 @@ protected IndexShard getIndexShard(String node, String indexName) { return shardId.map(indexService::getShard).orElse(null); } - public void testRestoreShallowCopySnapshotWithDifferentRepo() throws IOException { - String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(); - String primary = internalCluster().startDataOnlyNode(); - String indexName1 = "testindex1"; - String indexName2 = "testindex2"; - String snapshotRepoName = "test-restore-snapshot-repo"; - String remoteStoreRepo2Name = "test-rs-repo-2" + TEST_REMOTE_STORE_REPO_SUFFIX; - String snapshotName1 = "test-restore-snapshot1"; - Path absolutePath1 = randomRepoPath().toAbsolutePath(); - Path absolutePath3 = randomRepoPath().toAbsolutePath(); - String restoredIndexName1 = indexName1 + "-restored"; - - createRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, false)); - createRepository(remoteStoreRepo2Name, "fs", absolutePath3); - - Client client = client(); - Settings indexSettings = getIndexSettings(1, 0).build(); - createIndex(indexName1, indexSettings); - - Settings indexSettings2 = getIndexSettings(1, 0).build(); - createIndex(indexName2, indexSettings2); - - final int numDocsInIndex1 = 5; - final int numDocsInIndex2 = 6; - indexDocuments(client, indexName1, numDocsInIndex1); - indexDocuments(client, indexName2, numDocsInIndex2); - ensureGreen(indexName1, indexName2); - - internalCluster().startDataOnlyNode(); - - logger.info("--> snapshot"); - SnapshotInfo snapshotInfo1 = createSnapshot( - snapshotRepoName, - snapshotName1, - new ArrayList<>(Arrays.asList(indexName1, indexName2)) - ); - assertThat(snapshotInfo1.successfulShards(), greaterThan(0)); - assertThat(snapshotInfo1.successfulShards(), equalTo(snapshotInfo1.totalShards())); - assertThat(snapshotInfo1.state(), equalTo(SnapshotState.SUCCESS)); - - Settings remoteStoreIndexSettings = Settings.builder() - .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, remoteStoreRepo2Name) - .build(); - // restore index as a remote store index with different remote store repo - RestoreSnapshotResponse restoreSnapshotResponse = client.admin() - .cluster() - .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) - .setWaitForCompletion(false) - .setIndexSettings(remoteStoreIndexSettings) - .setIndices(indexName1) - .setRenamePattern(indexName1) - .setRenameReplacement(restoredIndexName1) - .get(); - assertEquals(restoreSnapshotResponse.status(), RestStatus.ACCEPTED); - ensureGreen(restoredIndexName1); - assertDocsPresentInIndex(client(), restoredIndexName1, numDocsInIndex1); - - // deleting data for restoredIndexName1 and restoring from remote store. - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primary)); - // Re-initialize client to make sure we are not using client from stopped node. - client = client(clusterManagerNode); - assertAcked(client.admin().indices().prepareClose(restoredIndexName1)); - client.admin() - .cluster() - .restoreRemoteStore( - new RestoreRemoteStoreRequest().indices(restoredIndexName1).restoreAllShards(true), - PlainActionFuture.newFuture() - ); - ensureYellowAndNoInitializingShards(restoredIndexName1); - ensureGreen(restoredIndexName1); - // indexing some new docs and validating - assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1); - indexDocuments(client, restoredIndexName1, numDocsInIndex1, numDocsInIndex1 + 2); - ensureGreen(restoredIndexName1); - assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1 + 2); - } - public void testRestoreShallowSnapshotRepository() throws ExecutionException, InterruptedException { String indexName1 = "testindex1"; String snapshotRepoName = "test-restore-snapshot-repo"; @@ -656,4 +521,98 @@ public void testRestoreShallowSnapshotIndexAfterSnapshot() throws ExecutionExcep assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1 + 2); } + public void testInvalidRestoreRequestScenarios() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + internalCluster().startDataOnlyNode(); + String index = "test-index"; + String snapshotRepo = "test-restore-snapshot-repo"; + String newRemoteStoreRepo = "test-new-rs-repo"; + String snapshotName1 = "test-restore-snapshot1"; + String snapshotName2 = "test-restore-snapshot2"; + Path absolutePath1 = randomRepoPath().toAbsolutePath(); + logger.info("Snapshot Path [{}]", absolutePath1); + String restoredIndex = index + "-restored"; + + createRepository(snapshotRepo, "fs", getRepositorySettings(absolutePath1, true)); + + Client client = client(); + Settings indexSettings = getIndexSettings(1, 0).build(); + createIndex(index, indexSettings); + + final int numDocsInIndex = 5; + indexDocuments(client, index, numDocsInIndex); + ensureGreen(index); + + internalCluster().startDataOnlyNode(); + logger.info("--> snapshot"); + + SnapshotInfo snapshotInfo = createSnapshot(snapshotRepo, snapshotName1, new ArrayList<>(List.of(index))); + assertThat(snapshotInfo.state(), equalTo(SnapshotState.SUCCESS)); + assertThat(snapshotInfo.successfulShards(), greaterThan(0)); + assertThat(snapshotInfo.successfulShards(), equalTo(snapshotInfo.totalShards())); + + updateRepository(snapshotRepo, "fs", getRepositorySettings(absolutePath1, false)); + SnapshotInfo snapshotInfo2 = createSnapshot(snapshotRepo, snapshotName2, new ArrayList<>(List.of(index))); + assertThat(snapshotInfo2.state(), equalTo(SnapshotState.SUCCESS)); + assertThat(snapshotInfo2.successfulShards(), greaterThan(0)); + assertThat(snapshotInfo2.successfulShards(), equalTo(snapshotInfo2.totalShards())); + + DeleteResponse deleteResponse = client().prepareDelete(index, "0").execute().actionGet(); + assertEquals(deleteResponse.getResult(), DocWriteResponse.Result.DELETED); + indexDocuments(client, index, numDocsInIndex, numDocsInIndex + randomIntBetween(2, 5)); + ensureGreen(index); + + // try index restore with remote store disabled + SnapshotRestoreException exception = expectThrows( + SnapshotRestoreException.class, + () -> client().admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepo, snapshotName1) + .setWaitForCompletion(false) + .setIgnoreIndexSettings(SETTING_REMOTE_STORE_ENABLED) + .setIndices(index) + .setRenamePattern(index) + .setRenameReplacement(restoredIndex) + .get() + ); + assertTrue(exception.getMessage().contains("cannot remove setting [index.remote_store.enabled] on restore")); + + // try index restore with remote store repository modified + Settings remoteStoreIndexSettings = Settings.builder() + .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, newRemoteStoreRepo) + .build(); + + exception = expectThrows( + SnapshotRestoreException.class, + () -> client().admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepo, snapshotName1) + .setWaitForCompletion(false) + .setIndexSettings(remoteStoreIndexSettings) + .setIndices(index) + .setRenamePattern(index) + .setRenameReplacement(restoredIndex) + .get() + ); + assertTrue(exception.getMessage().contains("cannot modify setting [index.remote_store.segment.repository]" + " on restore")); + + // try index restore with remote store repository and translog store repository disabled + exception = expectThrows( + SnapshotRestoreException.class, + () -> client().admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepo, snapshotName1) + .setWaitForCompletion(false) + .setIgnoreIndexSettings( + IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, + IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY + ) + .setIndices(index) + .setRenamePattern(index) + .setRenameReplacement(restoredIndex) + .get() + ); + assertTrue(exception.getMessage().contains("cannot remove setting [index.remote_store.segment.repository]" + " on restore")); + } + } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java index ba90cbe96e157..d7ad0daa43524 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java @@ -28,7 +28,6 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Settings; -import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.index.Index; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexService; @@ -57,11 +56,8 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; -import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; -import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; -import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.repositories.fs.ReloadableFsRepository.REPOSITORIES_FAILRATE_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @@ -191,121 +187,6 @@ protected BulkResponse indexBulk(String indexName, int numDocs) { return client().bulk(bulkRequest).actionGet(); } - public static Settings remoteStoreClusterSettings(String name, Path path) { - return remoteStoreClusterSettings(name, path, name, path); - } - - public static Settings remoteStoreClusterSettings( - String segmentRepoName, - Path segmentRepoPath, - String segmentRepoType, - String translogRepoName, - Path translogRepoPath, - String translogRepoType - ) { - Settings.Builder settingsBuilder = Settings.builder(); - settingsBuilder.put( - buildRemoteStoreNodeAttributes( - segmentRepoName, - segmentRepoPath, - segmentRepoType, - translogRepoName, - translogRepoPath, - translogRepoType, - false - ) - ); - return settingsBuilder.build(); - } - - public static Settings remoteStoreClusterSettings( - String segmentRepoName, - Path segmentRepoPath, - String translogRepoName, - Path translogRepoPath - ) { - Settings.Builder settingsBuilder = Settings.builder(); - settingsBuilder.put(buildRemoteStoreNodeAttributes(segmentRepoName, segmentRepoPath, translogRepoName, translogRepoPath, false)); - return settingsBuilder.build(); - } - - public static Settings buildRemoteStoreNodeAttributes( - String segmentRepoName, - Path segmentRepoPath, - String translogRepoName, - Path translogRepoPath, - boolean withRateLimiterAttributes - ) { - return buildRemoteStoreNodeAttributes( - segmentRepoName, - segmentRepoPath, - ReloadableFsRepository.TYPE, - translogRepoName, - translogRepoPath, - ReloadableFsRepository.TYPE, - withRateLimiterAttributes - ); - } - - public static Settings buildRemoteStoreNodeAttributes( - String segmentRepoName, - Path segmentRepoPath, - String segmentRepoType, - String translogRepoName, - Path translogRepoPath, - String translogRepoType, - boolean withRateLimiterAttributes - ) { - String segmentRepoTypeAttributeKey = String.format( - Locale.getDefault(), - "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, - segmentRepoName - ); - String segmentRepoSettingsAttributeKeyPrefix = String.format( - Locale.getDefault(), - "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, - segmentRepoName - ); - String translogRepoTypeAttributeKey = String.format( - Locale.getDefault(), - "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, - translogRepoName - ); - String translogRepoSettingsAttributeKeyPrefix = String.format( - Locale.getDefault(), - "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, - translogRepoName - ); - String stateRepoTypeAttributeKey = String.format( - Locale.getDefault(), - "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, - segmentRepoName - ); - String stateRepoSettingsAttributeKeyPrefix = String.format( - Locale.getDefault(), - "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, - segmentRepoName - ); - - Settings.Builder settings = Settings.builder() - .put("node.attr." + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, segmentRepoName) - .put(segmentRepoTypeAttributeKey, segmentRepoType) - .put(segmentRepoSettingsAttributeKeyPrefix + "location", segmentRepoPath) - .put("node.attr." + REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, translogRepoName) - .put(translogRepoTypeAttributeKey, translogRepoType) - .put(translogRepoSettingsAttributeKeyPrefix + "location", translogRepoPath) - .put("node.attr." + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, segmentRepoName) - .put(stateRepoTypeAttributeKey, segmentRepoType) - .put(stateRepoSettingsAttributeKeyPrefix + "location", segmentRepoPath); - - if (withRateLimiterAttributes) { - settings.put(segmentRepoSettingsAttributeKeyPrefix + "compress", randomBoolean()) - .put(segmentRepoSettingsAttributeKeyPrefix + "chunk_size", 200, ByteSizeUnit.BYTES); - } - - return settings.build(); - } - Settings defaultIndexSettings() { return Settings.builder() .put(super.indexSettings()) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java index 906d45ef84b3f..2ce96092203e8 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java @@ -40,6 +40,7 @@ import org.opensearch.common.Numbers; import org.opensearch.common.collect.MapBuilder; import org.opensearch.common.document.DocumentField; +import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.settings.Settings; import org.opensearch.common.time.DateFormatter; import org.opensearch.common.time.DateUtils; @@ -51,6 +52,7 @@ import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.fielddata.ScriptDocValues; +import org.opensearch.index.mapper.DateFieldMapper; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.query.QueryBuilders; import org.opensearch.plugins.Plugin; @@ -189,6 +191,20 @@ protected Map, Object>> pluginScripts() { scripts.put("doc['s']", vars -> docScript(vars, "s")); scripts.put("doc['ms']", vars -> docScript(vars, "ms")); + scripts.put("doc['keyword_field']", vars -> sourceScript(vars, "keyword_field")); + scripts.put("doc['multi_keyword_field']", vars -> sourceScript(vars, "multi_keyword_field")); + scripts.put("doc['long_field']", vars -> sourceScript(vars, "long_field")); + scripts.put("doc['multi_long_field']", vars -> sourceScript(vars, "multi_long_field")); + scripts.put("doc['double_field']", vars -> sourceScript(vars, "double_field")); + scripts.put("doc['multi_double_field']", vars -> sourceScript(vars, "multi_double_field")); + scripts.put("doc['date_field']", vars -> sourceScript(vars, "date_field")); + scripts.put("doc['multi_date_field']", vars -> sourceScript(vars, "multi_date_field")); + scripts.put("doc['ip_field']", vars -> sourceScript(vars, "ip_field")); + scripts.put("doc['multi_ip_field']", vars -> sourceScript(vars, "multi_ip_field")); + scripts.put("doc['boolean_field']", vars -> sourceScript(vars, "boolean_field")); + scripts.put("doc['geo_field']", vars -> sourceScript(vars, "geo_field")); + scripts.put("doc['multi_geo_field']", vars -> sourceScript(vars, "multi_geo_field")); + return scripts; } @@ -1299,6 +1315,147 @@ public void testScriptFields() throws Exception { } } + public void testDerivedFields() throws Exception { + assertAcked( + prepareCreate("index").setMapping( + "keyword_field", + "type=keyword", + "multi_keyword_field", + "type=keyword", + "long_field", + "type=long", + "multi_long_field", + "type=long", + "double_field", + "type=double", + "multi_double_field", + "type=double", + "date_field", + "type=date", + "multi_date_field", + "type=date", + "ip_field", + "type=ip", + "multi_ip_field", + "type=ip", + "boolean_field", + "type=boolean", + "geo_field", + "type=geo_point", + "multi_geo_field", + "type=geo_point" + ).get() + ); + final int numDocs = randomIntBetween(3, 8); + List reqs = new ArrayList<>(); + + DateTime date1 = new DateTime(1990, 12, 29, 0, 0, DateTimeZone.UTC); + DateTime date2 = new DateTime(1990, 12, 30, 0, 0, DateTimeZone.UTC); + + for (int i = 0; i < numDocs; ++i) { + reqs.add( + client().prepareIndex("index") + .setId(Integer.toString(i)) + .setSource( + "keyword_field", + Integer.toString(i), + "multi_keyword_field", + new String[] { Integer.toString(i), Integer.toString(i + 1) }, + "long_field", + (long) i, + "multi_long_field", + new long[] { i, i + 1 }, + "double_field", + (double) i, + "multi_double_field", + new double[] { i, i + 1 }, + "date_field", + date1.getMillis(), + "multi_date_field", + new Long[] { date1.getMillis(), date2.getMillis() }, + "ip_field", + "172.16.1.10", + "multi_ip_field", + new String[] { "172.16.1.10", "172.16.1.11" }, + "boolean_field", + true, + "geo_field", + new GeoPoint(12.0, 10.0), + "multi_geo_field", + new GeoPoint[] { new GeoPoint(12.0, 10.0), new GeoPoint(13.0, 10.0) } + ) + ); + } + indexRandom(true, reqs); + indexRandomForConcurrentSearch("index"); + ensureSearchable(); + SearchRequestBuilder req = client().prepareSearch("index"); + String[][] fieldLookup = new String[][] { + { "keyword_field", "keyword" }, + { "multi_keyword_field", "keyword" }, + { "long_field", "long" }, + { "multi_long_field", "long" }, + { "double_field", "double" }, + { "multi_double_field", "double" }, + { "date_field", "date" }, + { "multi_date_field", "date" }, + { "ip_field", "ip" }, + { "multi_ip_field", "ip" }, + { "boolean_field", "boolean" }, + { "geo_field", "geo_point" }, + { "multi_geo_field", "geo_point" } }; + for (String[] field : fieldLookup) { + req.addDerivedField( + "derived_" + field[0], + field[1], + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['" + field[0] + "']", Collections.emptyMap()) + ); + } + req.addFetchField("derived_*"); + SearchResponse resp = req.get(); + assertSearchResponse(resp); + for (SearchHit hit : resp.getHits().getHits()) { + final int id = Integer.parseInt(hit.getId()); + Map fields = hit.getFields(); + + assertEquals(fields.get("derived_keyword_field").getValues().get(0), Integer.toString(id)); + assertEquals(fields.get("derived_multi_keyword_field").getValues().get(0), Integer.toString(id)); + assertEquals(fields.get("derived_multi_keyword_field").getValues().get(1), Integer.toString(id + 1)); + + assertEquals(fields.get("derived_long_field").getValues().get(0), id); + assertEquals(fields.get("derived_multi_long_field").getValues().get(0), id); + assertEquals(fields.get("derived_multi_long_field").getValues().get(1), (id + 1)); + + assertEquals(fields.get("derived_double_field").getValues().get(0), (double) id); + assertEquals(fields.get("derived_multi_double_field").getValues().get(0), (double) id); + assertEquals(fields.get("derived_multi_double_field").getValues().get(1), (double) (id + 1)); + + assertEquals( + fields.get("derived_date_field").getValues().get(0), + DateFieldMapper.getDefaultDateTimeFormatter().formatJoda(date1) + ); + assertEquals( + fields.get("derived_multi_date_field").getValues().get(0), + DateFieldMapper.getDefaultDateTimeFormatter().formatJoda(date1) + ); + assertEquals( + fields.get("derived_multi_date_field").getValues().get(1), + DateFieldMapper.getDefaultDateTimeFormatter().formatJoda(date2) + ); + + assertEquals(fields.get("derived_ip_field").getValues().get(0), "172.16.1.10"); + assertEquals(fields.get("derived_multi_ip_field").getValues().get(0), "172.16.1.10"); + assertEquals(fields.get("derived_multi_ip_field").getValues().get(1), "172.16.1.11"); + + assertEquals(fields.get("derived_boolean_field").getValues().get(0), true); + + assertEquals(fields.get("derived_geo_field").getValues().get(0), new GeoPoint(12.0, 10.0)); + assertEquals(fields.get("derived_multi_geo_field").getValues().get(0), new GeoPoint(12.0, 10.0)); + assertEquals(fields.get("derived_multi_geo_field").getValues().get(1), new GeoPoint(13.0, 10.0)); + + } + } + public void testDocValueFieldsWithFieldAlias() throws Exception { XContentBuilder mapping = XContentFactory.jsonBuilder() .startObject() diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SegmentReplicationSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SegmentReplicationSnapshotIT.java index b019bb57743c9..df1fc9b833171 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SegmentReplicationSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SegmentReplicationSnapshotIT.java @@ -112,12 +112,16 @@ public void createSnapshot() { } public RestoreSnapshotResponse restoreSnapshotWithSettings(Settings indexSettings) { + return restoreSnapshotWithSettings(indexSettings, RESTORED_INDEX_NAME); + } + + public RestoreSnapshotResponse restoreSnapshotWithSettings(Settings indexSettings, String restoredIndexName) { RestoreSnapshotRequestBuilder builder = client().admin() .cluster() .prepareRestoreSnapshot(REPOSITORY_NAME, SNAPSHOT_NAME) .setWaitForCompletion(false) .setRenamePattern(INDEX_NAME) - .setRenameReplacement(RESTORED_INDEX_NAME); + .setRenameReplacement(restoredIndexName); if (indexSettings != null) { builder.setIndexSettings(indexSettings); } @@ -311,7 +315,8 @@ public void testSnapshotRestoreOnIndexWithSegRepClusterSetting() throws Exceptio * 2. Snapshot index * 3. Add new set of nodes with `cluster.indices.replication.strategy` set to SEGMENT and `cluster.index.restrict.replication.type` * set to true. - * 4. Perform restore on new set of nodes to validate restored index has `DOCUMENT` replication. + * 4. Perform restore on new set of nodes to validate restored index has `SEGMENT` replication. + * 5. Validate that if replication type is passed as DOCUMENT as request parameter, restore operation fails */ public void testSnapshotRestoreOnRestrictReplicationSetting() throws Exception { final int documentCount = scaledRandomIntBetween(1, 10); @@ -337,9 +342,20 @@ public void testSnapshotRestoreOnRestrictReplicationSetting() throws Exception { createSnapshot(); - // Delete index + RestoreSnapshotResponse restoreSnapshotResponse = restoreSnapshotWithSettings(restoreIndexSegRepSettings(), RESTORED_INDEX_NAME); + assertEquals(restoreSnapshotResponse.status(), RestStatus.ACCEPTED); + ensureGreen(RESTORED_INDEX_NAME); + GetSettingsResponse settingsResponse = client().admin() + .indices() + .getSettings(new GetSettingsRequest().indices(RESTORED_INDEX_NAME).includeDefaults(true)) + .get(); + assertEquals(settingsResponse.getSetting(RESTORED_INDEX_NAME, SETTING_REPLICATION_TYPE), ReplicationType.SEGMENT.toString()); + + // Delete indices assertAcked(client().admin().indices().delete(new DeleteIndexRequest(INDEX_NAME)).get()); assertFalse("index [" + INDEX_NAME + "] should have been deleted", indexExists(INDEX_NAME)); + assertAcked(client().admin().indices().delete(new DeleteIndexRequest(RESTORED_INDEX_NAME)).get()); + assertFalse("index [" + RESTORED_INDEX_NAME + "] should have been deleted", indexExists(RESTORED_INDEX_NAME)); // Start new set of nodes with cluster level replication type setting and restrict replication type setting. Settings settings = Settings.builder() @@ -361,7 +377,25 @@ public void testSnapshotRestoreOnRestrictReplicationSetting() throws Exception { // Perform snapshot restore logger.info("--> Performing snapshot restore to target index"); - IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> restoreSnapshotWithSettings(null)); + restoreSnapshotResponse = restoreSnapshotWithSettings(null); + + // Assertions + assertEquals(restoreSnapshotResponse.status(), RestStatus.ACCEPTED); + ensureGreen(RESTORED_INDEX_NAME); + settingsResponse = client().admin() + .indices() + .getSettings(new GetSettingsRequest().indices(RESTORED_INDEX_NAME).includeDefaults(true)) + .get(); + assertEquals(settingsResponse.getSetting(RESTORED_INDEX_NAME, SETTING_REPLICATION_TYPE), ReplicationType.SEGMENT.toString()); + + // restore index with cluster default setting + restoreSnapshotWithSettings(restoreIndexSegRepSettings(), RESTORED_INDEX_NAME + "1"); + + // Perform Snapshot Restore with different index name + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> restoreSnapshotWithSettings(restoreIndexDocRepSettings(), RESTORED_INDEX_NAME + "2") + ); assertEquals(REPLICATION_MISMATCH_VALIDATION_ERROR, exception.getMessage()); } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shrink/TransportResizeAction.java b/server/src/main/java/org/opensearch/action/admin/indices/shrink/TransportResizeAction.java index ca4c16935c2b9..cb41325c18a22 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shrink/TransportResizeAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shrink/TransportResizeAction.java @@ -48,6 +48,7 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; @@ -57,6 +58,9 @@ import org.opensearch.index.IndexSettings; import org.opensearch.index.shard.DocsStats; import org.opensearch.index.store.StoreStats; +import org.opensearch.node.remotestore.RemoteStoreNodeService; +import org.opensearch.node.remotestore.RemoteStoreNodeService.CompatibilityMode; +import org.opensearch.node.remotestore.RemoteStoreNodeService.Direction; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -67,6 +71,7 @@ import java.util.function.IntFunction; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; /** * Main class to initiate resizing (shrink / split) an index into a new index @@ -140,8 +145,8 @@ protected void clusterManagerOperation( // there is no need to fetch docs stats for split but we keep it simple and do it anyway for simplicity of the code final String sourceIndex = indexNameExpressionResolver.resolveDateMathExpression(resizeRequest.getSourceIndex()); final String targetIndex = indexNameExpressionResolver.resolveDateMathExpression(resizeRequest.getTargetIndexRequest().index()); - IndexMetadata indexMetadata = state.metadata().index(sourceIndex); + ClusterSettings clusterSettings = clusterService.getClusterSettings(); if (resizeRequest.getResizeType().equals(ResizeType.SHRINK) && state.metadata().isSegmentReplicationEnabled(sourceIndex) && indexMetadata != null @@ -161,7 +166,7 @@ protected void clusterManagerOperation( CreateIndexClusterStateUpdateRequest updateRequest = prepareCreateIndexRequest(resizeRequest, state, i -> { IndexShardStats shard = indicesStatsResponse.getIndex(sourceIndex).getIndexShards().get(i); return shard == null ? null : shard.getPrimary().getDocs(); - }, indicesStatsResponse.getPrimaries().store, sourceIndex, targetIndex); + }, indicesStatsResponse.getPrimaries().store, clusterSettings, sourceIndex, targetIndex); if (indicesStatsResponse.getIndex(sourceIndex) .getTotal() @@ -200,7 +205,7 @@ protected void clusterManagerOperation( CreateIndexClusterStateUpdateRequest updateRequest = prepareCreateIndexRequest(resizeRequest, state, i -> { IndexShardStats shard = indicesStatsResponse.getIndex(sourceIndex).getIndexShards().get(i); return shard == null ? null : shard.getPrimary().getDocs(); - }, indicesStatsResponse.getPrimaries().store, sourceIndex, targetIndex); + }, indicesStatsResponse.getPrimaries().store, clusterSettings, sourceIndex, targetIndex); createIndexService.createIndex( updateRequest, ActionListener.map( @@ -223,6 +228,7 @@ static CreateIndexClusterStateUpdateRequest prepareCreateIndexRequest( final ClusterState state, final IntFunction perShardDocStats, final StoreStats primaryShardsStoreStats, + final ClusterSettings clusterSettings, String sourceIndexName, String targetIndexName ) { @@ -231,6 +237,7 @@ static CreateIndexClusterStateUpdateRequest prepareCreateIndexRequest( if (metadata == null) { throw new IndexNotFoundException(sourceIndexName); } + validateRemoteMigrationModeSettings(resizeRequest.getResizeType(), metadata, clusterSettings); final Settings.Builder targetIndexSettingsBuilder = Settings.builder() .put(targetIndex.settings()) .normalizePrefix(IndexMetadata.INDEX_SETTING_PREFIX); @@ -368,4 +375,39 @@ protected static int calculateTargetIndexShardsNum( protected String getClusterManagerActionName(DiscoveryNode node) { return super.getClusterManagerActionName(node); } + + /** + * Reject resize request if cluster mode is [Mixed] and migration direction is [RemoteStore] and index is not on + * REMOTE_STORE_ENABLED node or [DocRep] and index is on REMOTE_STORE_ENABLED node. + * @param type resize type + * @param sourceIndexMetadata source index's metadata + * @param clusterSettings cluster settings + * @throws IllegalStateException if cluster mode is [Mixed] and migration direction is [RemoteStore] or [DocRep] and + * index's SETTING_REMOTE_STORE_ENABLED is not equal to the migration direction's value. + * For example, if migration direction is [RemoteStore] and index's SETTING_REMOTE_STORE_ENABLED + * is false, then throw IllegalStateException. If migration direction is [DocRep] and + * index's SETTING_REMOTE_STORE_ENABLED is true, then throw IllegalStateException. + */ + private static void validateRemoteMigrationModeSettings( + final ResizeType type, + IndexMetadata sourceIndexMetadata, + ClusterSettings clusterSettings + ) { + CompatibilityMode compatibilityMode = clusterSettings.get(RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING); + if (compatibilityMode == CompatibilityMode.MIXED) { + boolean isRemoteStoreEnabled = sourceIndexMetadata.getSettings().getAsBoolean(SETTING_REMOTE_STORE_ENABLED, false); + Direction migrationDirection = clusterSettings.get(RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING); + boolean invalidConfiguration = (migrationDirection == Direction.REMOTE_STORE && isRemoteStoreEnabled == false) + || (migrationDirection == Direction.DOCREP && isRemoteStoreEnabled); + if (invalidConfiguration) { + throw new IllegalStateException( + "Index " + + type + + " is not allowed as remote migration mode is mixed" + + " and index is remote store " + + (isRemoteStoreEnabled ? "enabled" : "disabled") + ); + } + } + } } diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkRequestParser.java b/server/src/main/java/org/opensearch/action/bulk/BulkRequestParser.java index 3fadfe5f2cd6a..f705a218fb8e2 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkRequestParser.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkRequestParser.java @@ -371,7 +371,7 @@ public void parse( } IndexRequest upsertRequest = updateRequest.upsertRequest(); if (upsertRequest != null) { - upsertRequest.setPipeline(defaultPipeline); + upsertRequest.setPipeline(pipeline); } updateRequestConsumer.accept(updateRequest); diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestBuilder.java b/server/src/main/java/org/opensearch/action/search/SearchRequestBuilder.java index 9dac827e7d518..4a547ee2c82bd 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestBuilder.java @@ -363,6 +363,21 @@ public SearchRequestBuilder addScriptField(String name, Script script) { return this; } + /** + * Adds a derived field of a given type. The script provided will be used to derive the value + * of a given type. Thereafter, it can be treated as regular field of a given type to perform + * query on them. + * + * @param name The name of the field to be used in various parts of the query. The name will also represent + * the field value in the return hit. + * @param type The type of derived field. All values emitted by script must be of this type + * @param script The script to use + */ + public SearchRequestBuilder addDerivedField(String name, String type, Script script) { + sourceBuilder().derivedField(name, type, script); + return this; + } + /** * Adds a sort against the given field name and the sort ordering. * diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index dd4a1fe9a5b95..a79f7851b801e 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -53,6 +53,7 @@ import org.opensearch.cluster.block.ClusterBlock; import org.opensearch.cluster.block.ClusterBlockLevel; import org.opensearch.cluster.block.ClusterBlocks; +import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.IndexRoutingTable; import org.opensearch.cluster.routing.RoutingTable; @@ -97,8 +98,8 @@ import org.opensearch.indices.ShardLimitValidator; import org.opensearch.indices.SystemIndices; import org.opensearch.indices.replication.common.ReplicationType; -import org.opensearch.node.Node; import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; +import org.opensearch.node.remotestore.RemoteStoreNodeService; import org.opensearch.threadpool.ThreadPool; import java.io.IOException; @@ -140,6 +141,8 @@ import static org.opensearch.index.IndexModule.INDEX_STORE_TYPE_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteDataAttributePresent; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.isMigratingToRemoteStore; /** * Service responsible for submitting create index requests @@ -910,8 +913,8 @@ static Settings aggregateIndexSettings( indexSettingsBuilder.put(IndexMetadata.SETTING_INDEX_PROVIDED_NAME, request.getProvidedName()); indexSettingsBuilder.put(SETTING_INDEX_UUID, UUIDs.randomBase64UUID()); - updateReplicationStrategy(indexSettingsBuilder, request.settings(), settings, combinedTemplateSettings); - updateRemoteStoreSettings(indexSettingsBuilder, settings); + updateReplicationStrategy(indexSettingsBuilder, request.settings(), settings, combinedTemplateSettings, clusterSettings); + updateRemoteStoreSettings(indexSettingsBuilder, currentState, clusterSettings, settings, request.index()); if (sourceMetadata != null) { assert request.resizeType() != null; @@ -959,29 +962,34 @@ static Settings aggregateIndexSettings( * @param clusterSettings cluster level settings * @param combinedTemplateSettings combined template settings which satisfy the index */ - private static void updateReplicationStrategy( + public static void updateReplicationStrategy( Settings.Builder settingsBuilder, Settings requestSettings, - Settings clusterSettings, - Settings combinedTemplateSettings + Settings nodeSettings, + Settings combinedTemplateSettings, + ClusterSettings clusterSettings ) { // The replication setting is applied in the following order: - // 1. Explicit index creation request parameter - // 2. Template property for replication type - // 3. Defaults to segment if remote store attributes on the cluster - // 4. Default cluster level setting + // 1. Strictly SEGMENT if cluster is undergoing remote store migration + // 2. Explicit index creation request parameter + // 3. Template property for replication type + // 4. Replication type according to cluster level settings + // 5. Defaults to segment if remote store attributes on the cluster + // 6. Default cluster level setting final ReplicationType indexReplicationType; - if (INDEX_REPLICATION_TYPE_SETTING.exists(requestSettings)) { + if (isMigratingToRemoteStore(clusterSettings)) { + indexReplicationType = ReplicationType.SEGMENT; + } else if (INDEX_REPLICATION_TYPE_SETTING.exists(requestSettings)) { indexReplicationType = INDEX_REPLICATION_TYPE_SETTING.get(requestSettings); - } else if (INDEX_REPLICATION_TYPE_SETTING.exists(combinedTemplateSettings)) { + } else if (combinedTemplateSettings != null && INDEX_REPLICATION_TYPE_SETTING.exists(combinedTemplateSettings)) { indexReplicationType = INDEX_REPLICATION_TYPE_SETTING.get(combinedTemplateSettings); - } else if (CLUSTER_REPLICATION_TYPE_SETTING.exists(clusterSettings)) { - indexReplicationType = CLUSTER_REPLICATION_TYPE_SETTING.get(clusterSettings); - } else if (isRemoteDataAttributePresent(clusterSettings)) { + } else if (CLUSTER_REPLICATION_TYPE_SETTING.exists(nodeSettings)) { + indexReplicationType = CLUSTER_REPLICATION_TYPE_SETTING.get(nodeSettings); + } else if (isRemoteDataAttributePresent(nodeSettings)) { indexReplicationType = ReplicationType.SEGMENT; } else { - indexReplicationType = CLUSTER_REPLICATION_TYPE_SETTING.getDefault(clusterSettings); + indexReplicationType = CLUSTER_REPLICATION_TYPE_SETTING.getDefault(nodeSettings); } settingsBuilder.put(SETTING_REPLICATION_TYPE, indexReplicationType); } @@ -989,23 +997,49 @@ private static void updateReplicationStrategy( /** * Updates index settings to enable remote store by default based on node attributes * @param settingsBuilder index settings builder to be updated with relevant settings + * @param clusterState state of cluster * @param clusterSettings cluster level settings + * @param nodeSettings node level settings + * @param indexName name of index */ - private static void updateRemoteStoreSettings(Settings.Builder settingsBuilder, Settings clusterSettings) { - if (isRemoteDataAttributePresent(clusterSettings)) { - settingsBuilder.put(SETTING_REMOTE_STORE_ENABLED, true) - .put( - SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, - clusterSettings.get( - Node.NODE_ATTRIBUTES.getKey() + RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY - ) - ) - .put( - SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, - clusterSettings.get( - Node.NODE_ATTRIBUTES.getKey() + RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY - ) - ); + public static void updateRemoteStoreSettings( + Settings.Builder settingsBuilder, + ClusterState clusterState, + ClusterSettings clusterSettings, + Settings nodeSettings, + String indexName + ) { + if ((isRemoteDataAttributePresent(nodeSettings) + && clusterSettings.get(REMOTE_STORE_COMPATIBILITY_MODE_SETTING).equals(RemoteStoreNodeService.CompatibilityMode.STRICT)) + || isMigratingToRemoteStore(clusterSettings)) { + String segmentRepo, translogRepo; + + Optional remoteNode = clusterState.nodes() + .getNodes() + .values() + .stream() + .filter(DiscoveryNode::isRemoteStoreNode) + .findFirst(); + + if (remoteNode.isPresent()) { + translogRepo = remoteNode.get() + .getAttributes() + .get(RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY); + segmentRepo = remoteNode.get() + .getAttributes() + .get(RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY); + if (segmentRepo != null && translogRepo != null) { + settingsBuilder.put(SETTING_REMOTE_STORE_ENABLED, true) + .put(SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, segmentRepo) + .put(SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, translogRepo); + } else { + ValidationException validationException = new ValidationException(); + validationException.addValidationErrors( + Collections.singletonList("Cluster is migrating to remote store but no remote node found, failing index creation") + ); + throw new IndexCreationException(indexName, validationException); + } + } } } diff --git a/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java b/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java index 36149d014ea84..2c250f6a5d86e 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java +++ b/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java @@ -34,7 +34,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.cluster.metadata.WeightedRoutingMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.Nullable; @@ -63,7 +62,6 @@ import java.util.Set; import java.util.function.Predicate; import java.util.stream.Collectors; -import java.util.stream.Stream; import static java.util.Collections.emptyMap; @@ -96,8 +94,8 @@ public class IndexShardRoutingTable implements Iterable { private volatile Map initializingShardsByAttributes = emptyMap(); private final Object shardsByAttributeMutex = new Object(); private final Object shardsByWeightMutex = new Object(); - private volatile Map> activeShardsByWeight = emptyMap(); - private volatile Map> initializingShardsByWeight = emptyMap(); + private volatile Map activeShardsByWeight = emptyMap(); + private volatile Map initializingShardsByWeight = emptyMap(); private static final Logger logger = LogManager.getLogger(IndexShardRoutingTable.class); @@ -249,7 +247,7 @@ public List assignedShards() { return this.assignedShards; } - public Map> getActiveShardsByWeight() { + public Map getActiveShardsByWeight() { return activeShardsByWeight; } @@ -338,23 +336,7 @@ public ShardIterator activeInitializingShardsWeightedIt( // append shards for attribute value with weight zero, so that shard search requests can be tried on // shard copies in case of request failure from other attribute values. if (isFailOpenEnabled) { - try { - Stream keys = weightedRouting.weights() - .entrySet() - .stream() - .filter(entry -> entry.getValue().intValue() == WeightedRoutingMetadata.WEIGHED_AWAY_WEIGHT) - .map(Map.Entry::getKey); - keys.forEach(key -> { - ShardIterator iterator = onlyNodeSelectorActiveInitializingShardsIt(weightedRouting.attributeName() + ":" + key, nodes); - while (iterator.remaining() > 0) { - ordered.add(iterator.nextOrNull()); - } - }); - } catch (IllegalArgumentException e) { - // this exception is thrown by {@link onlyNodeSelectorActiveInitializingShardsIt} in case count of shard - // copies found is zero - logger.debug("no shard copies found for shard id [{}] for node attribute with weight zero", shardId); - } + ordered.addAll(activeInitializingShardsWithoutWeights(weightedRouting, nodes, defaultWeight)); } return new PlainShardIterator(shardId, ordered); @@ -378,6 +360,18 @@ private List activeInitializingShardsWithWeights( return orderedListWithDistinctShards; } + private List activeInitializingShardsWithoutWeights( + WeightedRouting weightedRouting, + DiscoveryNodes nodes, + double defaultWeight + ) { + List ordered = new ArrayList<>(getActiveShardsWithoutWeight(weightedRouting, nodes, defaultWeight)); + if (!allInitializingShards.isEmpty()) { + ordered.addAll(getInitializingShardsWithoutWeight(weightedRouting, nodes, defaultWeight)); + } + return ordered.stream().distinct().collect(Collectors.toList()); + } + /** * Returns a list containing shard routings ordered using weighted round-robin scheduling. */ @@ -949,20 +943,60 @@ public int hashCode() { } } + /** + * Holder class for shard routing(s) which are classified and stored based on their weights. + * + * @opensearch.api + */ + @PublicApi(since = "2.14.0") + public static class WeightedShardRoutings { + private final List shardRoutingsWithWeight; + private final List shardRoutingWithoutWeight; + + public WeightedShardRoutings(List shardRoutingsWithWeight, List shardRoutingWithoutWeight) { + this.shardRoutingsWithWeight = Collections.unmodifiableList(shardRoutingsWithWeight); + this.shardRoutingWithoutWeight = Collections.unmodifiableList(shardRoutingWithoutWeight); + } + + public List getShardRoutingsWithWeight() { + return shardRoutingsWithWeight; + } + + public List getShardRoutingWithoutWeight() { + return shardRoutingWithoutWeight; + } + } + /** * * * Gets active shard routing from memory if available, else calculates and put it in memory. */ private List getActiveShardsByWeight(WeightedRouting weightedRouting, DiscoveryNodes nodes, double defaultWeight) { WeightedRoutingKey key = new WeightedRoutingKey(weightedRouting); - List shardRoutings = activeShardsByWeight.get(key); - if (shardRoutings == null) { - synchronized (shardsByWeightMutex) { - shardRoutings = shardsOrderedByWeight(activeShards, weightedRouting, nodes, defaultWeight); - activeShardsByWeight = new MapBuilder().put(key, shardRoutings).immutableMap(); - } + if (activeShardsByWeight.get(key) == null) { + populateActiveShardWeightsMap(weightedRouting, nodes, defaultWeight); + } + return activeShardsByWeight.get(key).getShardRoutingsWithWeight(); + } + + private List getActiveShardsWithoutWeight(WeightedRouting weightedRouting, DiscoveryNodes nodes, double defaultWeight) { + WeightedRoutingKey key = new WeightedRoutingKey(weightedRouting); + if (activeShardsByWeight.get(key) == null) { + populateActiveShardWeightsMap(weightedRouting, nodes, defaultWeight); + } + return activeShardsByWeight.get(key).getShardRoutingWithoutWeight(); + } + + private void populateActiveShardWeightsMap(WeightedRouting weightedRouting, DiscoveryNodes nodes, double defaultWeight) { + WeightedRoutingKey key = new WeightedRoutingKey(weightedRouting); + List weightedRoutings = shardsOrderedByWeight(activeShards, weightedRouting, nodes, defaultWeight); + List nonWeightedRoutings = activeShards.stream() + .filter(shard -> !weightedRoutings.contains(shard)) + .collect(Collectors.toUnmodifiableList()); + synchronized (shardsByWeightMutex) { + activeShardsByWeight = new MapBuilder().put(key, new WeightedShardRoutings(weightedRoutings, nonWeightedRoutings)) + .immutableMap(); } - return shardRoutings; } /** @@ -971,14 +1005,34 @@ private List getActiveShardsByWeight(WeightedRouting weightedRouti */ private List getInitializingShardsByWeight(WeightedRouting weightedRouting, DiscoveryNodes nodes, double defaultWeight) { WeightedRoutingKey key = new WeightedRoutingKey(weightedRouting); - List shardRoutings = initializingShardsByWeight.get(key); - if (shardRoutings == null) { - synchronized (shardsByWeightMutex) { - shardRoutings = shardsOrderedByWeight(activeShards, weightedRouting, nodes, defaultWeight); - initializingShardsByWeight = new MapBuilder().put(key, shardRoutings).immutableMap(); - } + if (initializingShardsByWeight.get(key) == null) { + populateInitializingShardWeightsMap(weightedRouting, nodes, defaultWeight); + } + return initializingShardsByWeight.get(key).getShardRoutingsWithWeight(); + } + + private List getInitializingShardsWithoutWeight( + WeightedRouting weightedRouting, + DiscoveryNodes nodes, + double defaultWeight + ) { + WeightedRoutingKey key = new WeightedRoutingKey(weightedRouting); + if (initializingShardsByWeight.get(key) == null) { + populateInitializingShardWeightsMap(weightedRouting, nodes, defaultWeight); + } + return initializingShardsByWeight.get(key).getShardRoutingWithoutWeight(); + } + + private void populateInitializingShardWeightsMap(WeightedRouting weightedRouting, DiscoveryNodes nodes, double defaultWeight) { + WeightedRoutingKey key = new WeightedRoutingKey(weightedRouting); + List weightedRoutings = shardsOrderedByWeight(allInitializingShards, weightedRouting, nodes, defaultWeight); + List nonWeightedRoutings = allInitializingShards.stream() + .filter(shard -> !weightedRoutings.contains(shard)) + .collect(Collectors.toUnmodifiableList()); + synchronized (shardsByWeightMutex) { + initializingShardsByWeight = new MapBuilder().put(key, new WeightedShardRoutings(weightedRoutings, nonWeightedRoutings)) + .immutableMap(); } - return shardRoutings; } /** diff --git a/server/src/main/java/org/opensearch/cluster/routing/WeightedRouting.java b/server/src/main/java/org/opensearch/cluster/routing/WeightedRouting.java index 468fac08d2946..6f0e4fe90cfff 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/WeightedRouting.java +++ b/server/src/main/java/org/opensearch/cluster/routing/WeightedRouting.java @@ -14,6 +14,7 @@ import org.opensearch.core.common.io.stream.Writeable; import java.io.IOException; +import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.Objects; @@ -25,27 +26,26 @@ */ @PublicApi(since = "2.4.0") public class WeightedRouting implements Writeable { - private String attributeName; - private Map weights; + private final String attributeName; + private final Map weights; + private final int hashCode; public WeightedRouting() { - this.attributeName = ""; - this.weights = new HashMap<>(3); + this("", new HashMap<>(3)); } public WeightedRouting(String attributeName, Map weights) { this.attributeName = attributeName; - this.weights = weights; + this.weights = Collections.unmodifiableMap(weights); + this.hashCode = Objects.hash(this.attributeName, this.weights); } public WeightedRouting(WeightedRouting weightedRouting) { - this.attributeName = weightedRouting.attributeName(); - this.weights = weightedRouting.weights; + this(weightedRouting.attributeName(), weightedRouting.weights); } public WeightedRouting(StreamInput in) throws IOException { - attributeName = in.readString(); - weights = (Map) in.readGenericValue(); + this(in.readString(), (Map) in.readGenericValue()); } public boolean isSet() { @@ -70,7 +70,7 @@ public boolean equals(Object o) { @Override public int hashCode() { - return Objects.hash(attributeName, weights); + return hashCode; } @Override diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationConstraints.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationConstraints.java index 5375910c57579..6702db4b43e91 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationConstraints.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationConstraints.java @@ -30,9 +30,9 @@ public class AllocationConstraints { public AllocationConstraints() { this.constraints = new HashMap<>(); - this.constraints.putIfAbsent(INDEX_SHARD_PER_NODE_BREACH_CONSTRAINT_ID, new Constraint(isIndexShardsPerNodeBreached())); - this.constraints.putIfAbsent(INDEX_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID, new Constraint(isPerIndexPrimaryShardsPerNodeBreached())); - this.constraints.putIfAbsent(CLUSTER_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID, new Constraint(isPrimaryShardsPerNodeBreached())); + this.constraints.put(INDEX_SHARD_PER_NODE_BREACH_CONSTRAINT_ID, new Constraint(isIndexShardsPerNodeBreached())); + this.constraints.put(INDEX_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID, new Constraint(isPerIndexPrimaryShardsPerNodeBreached())); + this.constraints.put(CLUSTER_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID, new Constraint(isPrimaryShardsPerNodeBreached(0.0f))); } public void updateAllocationConstraint(String constraint, boolean enable) { diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/ConstraintTypes.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/ConstraintTypes.java index ae2d4a0926194..08fe8f92d1f80 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/ConstraintTypes.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/ConstraintTypes.java @@ -28,6 +28,11 @@ public class ConstraintTypes { */ public final static String CLUSTER_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID = "cluster.primary.shard.balance.constraint"; + /** + * Defines a cluster constraint which is breached when a node contains more than avg primary shards across all indices + */ + public final static String CLUSTER_PRIMARY_SHARD_REBALANCE_CONSTRAINT_ID = "cluster.primary.shard.rebalance.constraint"; + /** * Defines an index constraint which is breached when a node contains more than avg number of shards for an index */ @@ -70,14 +75,14 @@ public static Predicate isPerIndexPrimaryShardsPerN } /** - * Defines a predicate which returns true when a node contains more than average number of primary shards. This - * constraint is used in weight calculation during allocation only. When breached a high weight {@link ConstraintTypes#CONSTRAINT_WEIGHT} - * is assigned to node resulting in lesser chances of node being selected as allocation target + * Defines a predicate which returns true when a node contains more than average number of primary shards with added buffer. This + * constraint is used in weight calculation during allocation/rebalance both. When breached a high weight {@link ConstraintTypes#CONSTRAINT_WEIGHT} + * is assigned to node resulting in lesser chances of node being selected as allocation/rebalance target */ - public static Predicate isPrimaryShardsPerNodeBreached() { + public static Predicate isPrimaryShardsPerNodeBreached(float buffer) { return (params) -> { int primaryShardCount = params.getNode().numPrimaryShards(); - int allowedPrimaryShardCount = (int) Math.ceil(params.getBalancer().avgPrimaryShardsPerNode()); + int allowedPrimaryShardCount = (int) Math.ceil(params.getBalancer().avgPrimaryShardsPerNode() * (1 + buffer)); return primaryShardCount >= allowedPrimaryShardCount; }; } diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/RebalanceConstraints.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/RebalanceConstraints.java index a4036ec47ec0e..2c2138af18abc 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/RebalanceConstraints.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/RebalanceConstraints.java @@ -14,8 +14,10 @@ import java.util.HashMap; import java.util.Map; +import static org.opensearch.cluster.routing.allocation.ConstraintTypes.CLUSTER_PRIMARY_SHARD_REBALANCE_CONSTRAINT_ID; import static org.opensearch.cluster.routing.allocation.ConstraintTypes.INDEX_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID; import static org.opensearch.cluster.routing.allocation.ConstraintTypes.isPerIndexPrimaryShardsPerNodeBreached; +import static org.opensearch.cluster.routing.allocation.ConstraintTypes.isPrimaryShardsPerNodeBreached; /** * Constraints applied during rebalancing round; specify conditions which, if breached, reduce the @@ -27,9 +29,13 @@ public class RebalanceConstraints { private Map constraints; - public RebalanceConstraints() { + public RebalanceConstraints(RebalanceParameter rebalanceParameter) { this.constraints = new HashMap<>(); - this.constraints.putIfAbsent(INDEX_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID, new Constraint(isPerIndexPrimaryShardsPerNodeBreached())); + this.constraints.put(INDEX_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID, new Constraint(isPerIndexPrimaryShardsPerNodeBreached())); + this.constraints.put( + CLUSTER_PRIMARY_SHARD_REBALANCE_CONSTRAINT_ID, + new Constraint(isPrimaryShardsPerNodeBreached(rebalanceParameter.getPreferPrimaryBalanceBuffer())) + ); } public void updateRebalanceConstraint(String constraint, boolean enable) { diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/RebalanceParameter.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/RebalanceParameter.java new file mode 100644 index 0000000000000..35fbaede93ba3 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/RebalanceParameter.java @@ -0,0 +1,24 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.routing.allocation; + +/** + * RebalanceConstraint Params + */ +public class RebalanceParameter { + private float preferPrimaryBalanceBuffer; + + public RebalanceParameter(float preferPrimaryBalanceBuffer) { + this.preferPrimaryBalanceBuffer = preferPrimaryBalanceBuffer; + } + + public float getPreferPrimaryBalanceBuffer() { + return preferPrimaryBalanceBuffer; + } +} diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index 41ace0e7661fe..b2443490dd973 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -46,6 +46,7 @@ import org.opensearch.cluster.routing.allocation.ConstraintTypes; import org.opensearch.cluster.routing.allocation.MoveDecision; import org.opensearch.cluster.routing.allocation.RebalanceConstraints; +import org.opensearch.cluster.routing.allocation.RebalanceParameter; import org.opensearch.cluster.routing.allocation.RoutingAllocation; import org.opensearch.cluster.routing.allocation.ShardAllocationDecision; import org.opensearch.common.inject.Inject; @@ -61,6 +62,7 @@ import java.util.Set; import static org.opensearch.cluster.routing.allocation.ConstraintTypes.CLUSTER_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID; +import static org.opensearch.cluster.routing.allocation.ConstraintTypes.CLUSTER_PRIMARY_SHARD_REBALANCE_CONSTRAINT_ID; import static org.opensearch.cluster.routing.allocation.ConstraintTypes.INDEX_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID; import static org.opensearch.cluster.routing.allocation.ConstraintTypes.INDEX_SHARD_PER_NODE_BREACH_CONSTRAINT_ID; @@ -145,10 +147,29 @@ public class BalancedShardsAllocator implements ShardsAllocator { Property.NodeScope ); + public static final Setting PREFER_PRIMARY_SHARD_REBALANCE = Setting.boolSetting( + "cluster.routing.allocation.rebalance.primary.enable", + false, + Property.Dynamic, + Property.NodeScope + ); + + public static final Setting PRIMARY_SHARD_REBALANCE_BUFFER = Setting.floatSetting( + "cluster.routing.allocation.rebalance.primary.buffer", + 0.10f, + 0.0f, + Property.Dynamic, + Property.NodeScope + ); + private volatile boolean movePrimaryFirst; private volatile ShardMovementStrategy shardMovementStrategy; private volatile boolean preferPrimaryShardBalance; + private volatile boolean preferPrimaryShardRebalance; + private volatile float preferPrimaryShardRebalanceBuffer; + private volatile float indexBalanceFactor; + private volatile float shardBalanceFactor; private volatile WeightFunction weightFunction; private volatile float threshold; @@ -158,14 +179,21 @@ public BalancedShardsAllocator(Settings settings) { @Inject public BalancedShardsAllocator(Settings settings, ClusterSettings clusterSettings) { - setWeightFunction(INDEX_BALANCE_FACTOR_SETTING.get(settings), SHARD_BALANCE_FACTOR_SETTING.get(settings)); + setShardBalanceFactor(SHARD_BALANCE_FACTOR_SETTING.get(settings)); + setIndexBalanceFactor(INDEX_BALANCE_FACTOR_SETTING.get(settings)); + setPreferPrimaryShardRebalanceBuffer(PRIMARY_SHARD_REBALANCE_BUFFER.get(settings)); + updateWeightFunction(); setThreshold(THRESHOLD_SETTING.get(settings)); setPreferPrimaryShardBalance(PREFER_PRIMARY_SHARD_BALANCE.get(settings)); + setPreferPrimaryShardRebalance(PREFER_PRIMARY_SHARD_REBALANCE.get(settings)); setShardMovementStrategy(SHARD_MOVEMENT_STRATEGY_SETTING.get(settings)); clusterSettings.addSettingsUpdateConsumer(PREFER_PRIMARY_SHARD_BALANCE, this::setPreferPrimaryShardBalance); clusterSettings.addSettingsUpdateConsumer(SHARD_MOVE_PRIMARY_FIRST_SETTING, this::setMovePrimaryFirst); clusterSettings.addSettingsUpdateConsumer(SHARD_MOVEMENT_STRATEGY_SETTING, this::setShardMovementStrategy); - clusterSettings.addSettingsUpdateConsumer(INDEX_BALANCE_FACTOR_SETTING, SHARD_BALANCE_FACTOR_SETTING, this::setWeightFunction); + clusterSettings.addSettingsUpdateConsumer(INDEX_BALANCE_FACTOR_SETTING, this::updateIndexBalanceFactor); + clusterSettings.addSettingsUpdateConsumer(SHARD_BALANCE_FACTOR_SETTING, this::updateShardBalanceFactor); + clusterSettings.addSettingsUpdateConsumer(PRIMARY_SHARD_REBALANCE_BUFFER, this::updatePreferPrimaryShardBalanceBuffer); + clusterSettings.addSettingsUpdateConsumer(PREFER_PRIMARY_SHARD_REBALANCE, this::setPreferPrimaryShardRebalance); clusterSettings.addSettingsUpdateConsumer(THRESHOLD_SETTING, this::setThreshold); } @@ -190,8 +218,35 @@ private void setShardMovementStrategy(ShardMovementStrategy shardMovementStrateg } } - private void setWeightFunction(float indexBalance, float shardBalanceFactor) { - weightFunction = new WeightFunction(indexBalance, shardBalanceFactor); + private void setIndexBalanceFactor(float indexBalanceFactor) { + this.indexBalanceFactor = indexBalanceFactor; + } + + private void setShardBalanceFactor(float shardBalanceFactor) { + this.shardBalanceFactor = shardBalanceFactor; + } + + private void setPreferPrimaryShardRebalanceBuffer(float preferPrimaryShardRebalanceBuffer) { + this.preferPrimaryShardRebalanceBuffer = preferPrimaryShardRebalanceBuffer; + } + + private void updateIndexBalanceFactor(float indexBalanceFactor) { + this.indexBalanceFactor = indexBalanceFactor; + updateWeightFunction(); + } + + private void updateShardBalanceFactor(float shardBalanceFactor) { + this.shardBalanceFactor = shardBalanceFactor; + updateWeightFunction(); + } + + private void updatePreferPrimaryShardBalanceBuffer(float preferPrimaryShardBalanceBuffer) { + this.preferPrimaryShardRebalanceBuffer = preferPrimaryShardBalanceBuffer; + updateWeightFunction(); + } + + private void updateWeightFunction() { + weightFunction = new WeightFunction(this.indexBalanceFactor, this.shardBalanceFactor, this.preferPrimaryShardRebalanceBuffer); } /** @@ -205,6 +260,11 @@ private void setPreferPrimaryShardBalance(boolean preferPrimaryShardBalance) { this.weightFunction.updateRebalanceConstraint(INDEX_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID, preferPrimaryShardBalance); } + private void setPreferPrimaryShardRebalance(boolean preferPrimaryShardRebalance) { + this.preferPrimaryShardRebalance = preferPrimaryShardRebalance; + this.weightFunction.updateRebalanceConstraint(CLUSTER_PRIMARY_SHARD_REBALANCE_CONSTRAINT_ID, preferPrimaryShardRebalance); + } + private void setThreshold(float threshold) { this.threshold = threshold; } @@ -221,7 +281,8 @@ public void allocate(RoutingAllocation allocation) { shardMovementStrategy, weightFunction, threshold, - preferPrimaryShardBalance + preferPrimaryShardBalance, + preferPrimaryShardRebalance ); localShardsBalancer.allocateUnassigned(); localShardsBalancer.moveShards(); @@ -242,7 +303,8 @@ public ShardAllocationDecision decideShardAllocation(final ShardRouting shard, f shardMovementStrategy, weightFunction, threshold, - preferPrimaryShardBalance + preferPrimaryShardBalance, + preferPrimaryShardRebalance ); AllocateUnassignedDecision allocateUnassignedDecision = AllocateUnassignedDecision.NOT_TAKEN; MoveDecision moveDecision = MoveDecision.NOT_TAKEN; @@ -348,7 +410,7 @@ static class WeightFunction { private AllocationConstraints constraints; private RebalanceConstraints rebalanceConstraints; - WeightFunction(float indexBalance, float shardBalance) { + WeightFunction(float indexBalance, float shardBalance, float preferPrimaryBalanceBuffer) { float sum = indexBalance + shardBalance; if (sum <= 0.0f) { throw new IllegalArgumentException("Balance factors must sum to a value > 0 but was: " + sum); @@ -357,8 +419,9 @@ static class WeightFunction { theta1 = indexBalance / sum; this.indexBalance = indexBalance; this.shardBalance = shardBalance; + RebalanceParameter rebalanceParameter = new RebalanceParameter(preferPrimaryBalanceBuffer); this.constraints = new AllocationConstraints(); - this.rebalanceConstraints = new RebalanceConstraints(); + this.rebalanceConstraints = new RebalanceConstraints(rebalanceParameter); // Enable index shard per node breach constraint updateAllocationConstraint(INDEX_SHARD_PER_NODE_BREACH_CONSTRAINT_ID, true); } @@ -495,7 +558,7 @@ public Balancer( float threshold, boolean preferPrimaryBalance ) { - super(logger, allocation, shardMovementStrategy, weight, threshold, preferPrimaryBalance); + super(logger, allocation, shardMovementStrategy, weight, threshold, preferPrimaryBalance, false); } } diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java index 46edd86043ab2..696a83dd624a8 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java @@ -61,6 +61,7 @@ public class LocalShardsBalancer extends ShardsBalancer { private final ShardMovementStrategy shardMovementStrategy; private final boolean preferPrimaryBalance; + private final boolean preferPrimaryRebalance; private final BalancedShardsAllocator.WeightFunction weight; private final float threshold; @@ -76,7 +77,8 @@ public LocalShardsBalancer( ShardMovementStrategy shardMovementStrategy, BalancedShardsAllocator.WeightFunction weight, float threshold, - boolean preferPrimaryBalance + boolean preferPrimaryBalance, + boolean preferPrimaryRebalance ) { this.logger = logger; this.allocation = allocation; @@ -91,6 +93,7 @@ public LocalShardsBalancer( sorter = newNodeSorter(); inEligibleTargetNode = new HashSet<>(); this.preferPrimaryBalance = preferPrimaryBalance; + this.preferPrimaryRebalance = preferPrimaryRebalance; this.shardMovementStrategy = shardMovementStrategy; } @@ -995,13 +998,18 @@ private boolean tryRelocateShard(BalancedShardsAllocator.ModelNode minNode, Bala continue; } // This is a safety net which prevents un-necessary primary shard relocations from maxNode to minNode when - // doing such relocation wouldn't help in primary balance. + // doing such relocation wouldn't help in primary balance. The condition won't be applicable when we enable node level + // primary rebalance if (preferPrimaryBalance == true + && preferPrimaryRebalance == false && shard.primary() && maxNode.numPrimaryShards(shard.getIndexName()) - minNode.numPrimaryShards(shard.getIndexName()) < 2) { continue; } - + // Relax the above condition to per node to allow rebalancing to attain global balance + if (preferPrimaryRebalance == true && shard.primary() && maxNode.numPrimaryShards() - minNode.numPrimaryShards() < 2) { + continue; + } final Decision decision = new Decision.Multi().add(allocationDecision).add(rebalanceDecision); maxNode.removeShard(shard); long shardSize = allocation.clusterInfo().getShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE); diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/FilterAllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/FilterAllocationDecider.java index af4b2c61a95b1..d3200c1bc9d75 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/FilterAllocationDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/FilterAllocationDecider.java @@ -38,11 +38,13 @@ import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.RoutingNode; import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.cluster.routing.allocation.RoutingAllocation; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; +import org.opensearch.node.remotestore.RemoteStoreNodeService; import java.util.Map; @@ -102,14 +104,32 @@ public class FilterAllocationDecider extends AllocationDecider { private volatile DiscoveryNodeFilters clusterRequireFilters; private volatile DiscoveryNodeFilters clusterIncludeFilters; private volatile DiscoveryNodeFilters clusterExcludeFilters; + private volatile RemoteStoreNodeService.Direction migrationDirection; + private volatile RemoteStoreNodeService.CompatibilityMode compatibilityMode; public FilterAllocationDecider(Settings settings, ClusterSettings clusterSettings) { setClusterRequireFilters(CLUSTER_ROUTING_REQUIRE_GROUP_SETTING.getAsMap(settings)); setClusterExcludeFilters(CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING.getAsMap(settings)); setClusterIncludeFilters(CLUSTER_ROUTING_INCLUDE_GROUP_SETTING.getAsMap(settings)); + this.migrationDirection = RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING.get(settings); + this.compatibilityMode = RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING.get(settings); + clusterSettings.addAffixMapUpdateConsumer(CLUSTER_ROUTING_REQUIRE_GROUP_SETTING, this::setClusterRequireFilters, (a, b) -> {}); clusterSettings.addAffixMapUpdateConsumer(CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING, this::setClusterExcludeFilters, (a, b) -> {}); clusterSettings.addAffixMapUpdateConsumer(CLUSTER_ROUTING_INCLUDE_GROUP_SETTING, this::setClusterIncludeFilters, (a, b) -> {}); + clusterSettings.addSettingsUpdateConsumer(RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING, this::setMigrationDirection); + clusterSettings.addSettingsUpdateConsumer( + RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING, + this::setCompatibilityMode + ); + } + + private void setMigrationDirection(RemoteStoreNodeService.Direction migrationDirection) { + this.migrationDirection = migrationDirection; + } + + private void setCompatibilityMode(RemoteStoreNodeService.CompatibilityMode compatibilityMode) { + this.compatibilityMode = compatibilityMode; } @Override @@ -127,10 +147,28 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing "initial allocation of the shrunken index is only allowed on nodes [%s] that hold a copy of every shard in the index"; return allocation.decision(Decision.NO, NAME, explanation, initialRecoveryFilters); } + + Decision decision = isRemoteStoreMigrationReplicaDecision(shardRouting, allocation); + if (decision != null) return decision; } return shouldFilter(shardRouting, node.node(), allocation); } + public Decision isRemoteStoreMigrationReplicaDecision(ShardRouting shardRouting, RoutingAllocation allocation) { + assert shardRouting.unassigned(); + boolean primaryOnRemote = RemoteStoreMigrationAllocationDecider.isPrimaryOnRemote(shardRouting.shardId(), allocation); + if (shardRouting.primary() == false + && shardRouting.unassignedInfo().getReason() != UnassignedInfo.Reason.INDEX_CREATED + && (compatibilityMode.equals(RemoteStoreNodeService.CompatibilityMode.MIXED)) + && (migrationDirection.equals(RemoteStoreNodeService.Direction.REMOTE_STORE)) + && primaryOnRemote == false) { + String explanation = + "in remote store migration, allocation filters are not applicable for replica copies whose primary is on doc rep node"; + return allocation.decision(Decision.YES, NAME, explanation); + } + return null; + } + @Override public Decision canAllocate(IndexMetadata indexMetadata, RoutingNode node, RoutingAllocation allocation) { return shouldFilter(indexMetadata, node.node(), allocation); diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/RemoteStoreMigrationAllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/RemoteStoreMigrationAllocationDecider.java index 27ebe5390ea6d..7d40aacb71e25 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/RemoteStoreMigrationAllocationDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/RemoteStoreMigrationAllocationDecider.java @@ -39,6 +39,7 @@ import org.opensearch.cluster.routing.allocation.RoutingAllocation; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.node.remotestore.RemoteStoreNodeService; import org.opensearch.node.remotestore.RemoteStoreNodeService.CompatibilityMode; import org.opensearch.node.remotestore.RemoteStoreNodeService.Direction; @@ -60,9 +61,8 @@ public class RemoteStoreMigrationAllocationDecider extends AllocationDecider { public static final String NAME = "remote_store_migration"; - private Direction migrationDirection; - private CompatibilityMode compatibilityMode; - private boolean remoteStoreBackedIndex; + volatile private Direction migrationDirection; + volatile private CompatibilityMode compatibilityMode; public RemoteStoreMigrationAllocationDecider(Settings settings, ClusterSettings clusterSettings) { this.migrationDirection = RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING.get(settings); @@ -106,9 +106,7 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing // check for remote store backed indices IndexMetadata indexMetadata = allocation.metadata().getIndexSafe(shardRouting.index()); - if (IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING.exists(indexMetadata.getSettings())) { - remoteStoreBackedIndex = IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING.get(indexMetadata.getSettings()); - } + boolean remoteStoreBackedIndex = IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING.get(indexMetadata.getSettings()); if (remoteStoreBackedIndex && targetNode.isRemoteStoreNode() == false) { // allocations and relocations must be to a remote node String reason = String.format( @@ -133,15 +131,20 @@ private Decision primaryShardDecision(ShardRouting primaryShardRouting, Discover return allocation.decision(Decision.YES, NAME, getDecisionDetails(true, primaryShardRouting, targetNode, "")); } + // Checks if primary shard is on a remote node. + static boolean isPrimaryOnRemote(ShardId shardId, RoutingAllocation allocation) { + ShardRouting primaryShardRouting = allocation.routingNodes().activePrimary(shardId); + if (primaryShardRouting != null) { + DiscoveryNode primaryShardNode = allocation.nodes().getNodes().get(primaryShardRouting.currentNodeId()); + return primaryShardNode.isRemoteStoreNode(); + } + return false; + } + private Decision replicaShardDecision(ShardRouting replicaShardRouting, DiscoveryNode targetNode, RoutingAllocation allocation) { if (targetNode.isRemoteStoreNode()) { - ShardRouting primaryShardRouting = allocation.routingNodes().activePrimary(replicaShardRouting.shardId()); - boolean primaryHasMigratedToRemote = false; - if (primaryShardRouting != null) { - DiscoveryNode primaryShardNode = allocation.nodes().getNodes().get(primaryShardRouting.currentNodeId()); - primaryHasMigratedToRemote = primaryShardNode.isRemoteStoreNode(); - } - if (primaryHasMigratedToRemote == false) { + boolean primaryOnRemote = RemoteStoreMigrationAllocationDecider.isPrimaryOnRemote(replicaShardRouting.shardId(), allocation); + if (primaryOnRemote == false) { return allocation.decision( Decision.NO, NAME, diff --git a/server/src/main/java/org/opensearch/common/cache/store/config/CacheConfig.java b/server/src/main/java/org/opensearch/common/cache/store/config/CacheConfig.java index 4c9881e845d42..e537ece759e65 100644 --- a/server/src/main/java/org/opensearch/common/cache/store/config/CacheConfig.java +++ b/server/src/main/java/org/opensearch/common/cache/store/config/CacheConfig.java @@ -12,6 +12,7 @@ import org.opensearch.common.cache.RemovalListener; import org.opensearch.common.cache.policy.CachedQueryResult; import org.opensearch.common.cache.serializer.Serializer; +import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; @@ -61,6 +62,8 @@ public class CacheConfig { */ private final TimeValue expireAfterAccess; + private final ClusterSettings clusterSettings; + private CacheConfig(Builder builder) { this.keyType = builder.keyType; this.valueType = builder.valueType; @@ -72,6 +75,7 @@ private CacheConfig(Builder builder) { this.cachedResultParser = builder.cachedResultParser; this.maxSizeInBytes = builder.maxSizeInBytes; this.expireAfterAccess = builder.expireAfterAccess; + this.clusterSettings = builder.clusterSettings; } public Class getKeyType() { @@ -114,6 +118,10 @@ public TimeValue getExpireAfterAccess() { return expireAfterAccess; } + public ClusterSettings getClusterSettings() { + return clusterSettings; + } + /** * Builder class to build Cache config related parameters. * @param Type of key. @@ -138,6 +146,7 @@ public static class Builder { private long maxSizeInBytes; private TimeValue expireAfterAccess; + private ClusterSettings clusterSettings; public Builder() {} @@ -191,6 +200,11 @@ public Builder setExpireAfterAccess(TimeValue expireAfterAccess) { return this; } + public Builder setClusterSettings(ClusterSettings clusterSettings) { + this.clusterSettings = clusterSettings; + return this; + } + public CacheConfig build() { return new CacheConfig<>(this); } diff --git a/server/src/main/java/org/opensearch/common/regex/Regex.java b/server/src/main/java/org/opensearch/common/regex/Regex.java index 323b460af62df..6d8b5c3585c4c 100644 --- a/server/src/main/java/org/opensearch/common/regex/Regex.java +++ b/server/src/main/java/org/opensearch/common/regex/Regex.java @@ -35,6 +35,7 @@ import org.apache.lucene.util.automaton.Automata; import org.apache.lucene.util.automaton.Automaton; import org.apache.lucene.util.automaton.Operations; +import org.opensearch.common.Glob; import org.opensearch.core.common.Strings; import java.util.ArrayList; @@ -125,39 +126,7 @@ public static boolean simpleMatch(String pattern, String str, boolean caseInsens pattern = Strings.toLowercaseAscii(pattern); str = Strings.toLowercaseAscii(str); } - return simpleMatchWithNormalizedStrings(pattern, str); - } - - private static boolean simpleMatchWithNormalizedStrings(String pattern, String str) { - int sIdx = 0, pIdx = 0, match = 0, wildcardIdx = -1; - while (sIdx < str.length()) { - // both chars matching, incrementing both pointers - if (pIdx < pattern.length() && str.charAt(sIdx) == pattern.charAt(pIdx)) { - sIdx++; - pIdx++; - } else if (pIdx < pattern.length() && pattern.charAt(pIdx) == '*') { - // wildcard found, only incrementing pattern pointer - wildcardIdx = pIdx; - match = sIdx; - pIdx++; - } else if (wildcardIdx != -1) { - // last pattern pointer was a wildcard, incrementing string pointer - pIdx = wildcardIdx + 1; - match++; - sIdx = match; - } else { - // current pattern pointer is not a wildcard, last pattern pointer was also not a wildcard - // characters do not match - return false; - } - } - - // check for remaining characters in pattern - while (pIdx < pattern.length() && pattern.charAt(pIdx) == '*') { - pIdx++; - } - - return pIdx == pattern.length(); + return Glob.globMatch(pattern, str); } /** diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 7c69a0487b618..ccf7fd9c93f29 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -84,6 +84,7 @@ import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.cache.CacheType; import org.opensearch.common.cache.settings.CacheSettings; +import org.opensearch.common.cache.store.settings.OpenSearchOnHeapCacheSettings; import org.opensearch.common.logging.Loggers; import org.opensearch.common.network.NetworkModule; import org.opensearch.common.network.NetworkService; @@ -253,7 +254,9 @@ public void apply(Settings value, Settings current, Settings previous) { AwarenessReplicaBalance.CLUSTER_ROUTING_ALLOCATION_AWARENESS_BALANCE_SETTING, BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING, BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING, + BalancedShardsAllocator.PRIMARY_SHARD_REBALANCE_BUFFER, BalancedShardsAllocator.PREFER_PRIMARY_SHARD_BALANCE, + BalancedShardsAllocator.PREFER_PRIMARY_SHARD_REBALANCE, BalancedShardsAllocator.SHARD_MOVE_PRIMARY_FIRST_SETTING, BalancedShardsAllocator.SHARD_MOVEMENT_STRATEGY_SETTING, BalancedShardsAllocator.THRESHOLD_SETTING, @@ -290,6 +293,7 @@ public void apply(Settings value, Settings current, Settings previous) { ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_CLUSTER, ShardLimitValidator.SETTING_CLUSTER_IGNORE_DOT_INDEXES, RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING, + RecoverySettings.INDICES_REPLICATION_MAX_BYTES_PER_SEC_SETTING, RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING, RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING, RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING, @@ -748,6 +752,14 @@ public void apply(Settings value, Settings current, Settings previous) { TelemetrySettings.METRICS_FEATURE_ENABLED_SETTING ), List.of(FeatureFlags.PLUGGABLE_CACHE), - List.of(CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE)) + List.of( + CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE), + OpenSearchOnHeapCacheSettings.MAXIMUM_SIZE_IN_BYTES.getConcreteSettingForNamespace( + CacheType.INDICES_REQUEST_CACHE.getSettingPrefix() + ), + OpenSearchOnHeapCacheSettings.EXPIRE_AFTER_ACCESS_SETTING.getConcreteSettingForNamespace( + CacheType.INDICES_REQUEST_CACHE.getSettingPrefix() + ) + ) ); } diff --git a/server/src/main/java/org/opensearch/gateway/AsyncShardBatchFetch.java b/server/src/main/java/org/opensearch/gateway/AsyncShardBatchFetch.java new file mode 100644 index 0000000000000..4f39a39cea678 --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/AsyncShardBatchFetch.java @@ -0,0 +1,243 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway; + +import org.apache.logging.log4j.Logger; +import org.opensearch.action.support.nodes.BaseNodeResponse; +import org.opensearch.action.support.nodes.BaseNodesResponse; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.logging.Loggers; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.indices.store.ShardAttributes; + +import java.lang.reflect.Array; +import java.util.HashMap; +import java.util.Map; +import java.util.Set; +import java.util.function.Predicate; + +import reactor.util.annotation.NonNull; + +/** + * Implementation of AsyncShardFetch with batching support. This class is responsible for executing the fetch + * part using the base class {@link AsyncShardFetch}. Other functionalities needed for a batch are only written here. + * This separation also takes care of the extra generic type V which is only needed for batch + * transport actions like {@link TransportNodesListGatewayStartedShardsBatch} and + * {@link org.opensearch.indices.store.TransportNodesListShardStoreMetadataBatch}. + * + * @param Response type of the transport action. + * @param Data type of shard level response. + * + * @opensearch.internal + */ +public abstract class AsyncShardBatchFetch extends AsyncShardFetch { + + @SuppressWarnings("unchecked") + AsyncShardBatchFetch( + Logger logger, + String type, + Map shardAttributesMap, + AsyncShardFetch.Lister, T> action, + String batchId, + Class clazz, + V emptyShardResponse, + Predicate emptyShardResponsePredicate, + ShardBatchResponseFactory responseFactory + ) { + super( + logger, + type, + shardAttributesMap, + action, + batchId, + new ShardBatchCache<>( + logger, + type, + shardAttributesMap, + "BatchID=[" + batchId + "]", + clazz, + emptyShardResponse, + emptyShardResponsePredicate, + responseFactory + ) + ); + } + + /** + * Remove a shard from the cache maintaining a full batch of shards. This is needed to clear the shard once it's + * assigned or failed. + * + * @param shardId shardId to be removed from the batch. + */ + public synchronized void clearShard(ShardId shardId) { + this.shardAttributesMap.remove(shardId); + this.cache.deleteShard(shardId); + } + + /** + * Cache implementation of transport actions returning batch of shards related data in the response. + * Store node level responses of transport actions like {@link TransportNodesListGatewayStartedShardsBatch} or + * {@link org.opensearch.indices.store.TransportNodesListShardStoreMetadataBatch} with memory efficient caching + * approach. This cache class is not thread safe, all of its methods are being called from + * {@link AsyncShardFetch} class which has synchronized blocks present to handle multiple threads. + * + * @param Response type of transport action. + * @param Data type of shard level response. + */ + static class ShardBatchCache extends AsyncShardFetchCache { + private final Map> cache; + private final Map shardIdToArray; + private final int batchSize; + private final Class shardResponseClass; + private final ShardBatchResponseFactory responseFactory; + private final V emptyResponse; + private final Predicate emptyShardResponsePredicate; + private final Logger logger; + + public ShardBatchCache( + Logger logger, + String type, + Map shardAttributesMap, + String logKey, + Class clazz, + V emptyResponse, + Predicate emptyShardResponsePredicate, + ShardBatchResponseFactory responseFactory + ) { + super(Loggers.getLogger(logger, "_" + logKey), type); + this.batchSize = shardAttributesMap.size(); + this.emptyShardResponsePredicate = emptyShardResponsePredicate; + cache = new HashMap<>(); + shardIdToArray = new HashMap<>(); + fillShardIdKeys(shardAttributesMap.keySet()); + this.shardResponseClass = clazz; + this.emptyResponse = emptyResponse; + this.logger = logger; + this.responseFactory = responseFactory; + } + + @Override + @NonNull + public Map getCache() { + return cache; + } + + @Override + public void deleteShard(ShardId shardId) { + if (shardIdToArray.containsKey(shardId)) { + Integer shardIdIndex = shardIdToArray.remove(shardId); + for (String nodeId : cache.keySet()) { + cache.get(nodeId).clearShard(shardIdIndex); + } + } + } + + @Override + public void initData(DiscoveryNode node) { + cache.put(node.getId(), new NodeEntry<>(node.getId(), shardResponseClass, batchSize, emptyShardResponsePredicate)); + } + + /** + * Put the response received from data nodes into the cache. + * Get shard level data from batch, then filter out if any shards received failures. + * After that complete storing the data at node level and mark fetching as done. + * + * @param node node from which we got the response. + * @param response shard metadata coming from node. + */ + @Override + public void putData(DiscoveryNode node, T response) { + NodeEntry nodeEntry = cache.get(node.getId()); + Map batchResponse = responseFactory.getShardBatchData(response); + nodeEntry.doneFetching(batchResponse, shardIdToArray); + } + + @Override + public T getData(DiscoveryNode node) { + return this.responseFactory.getNewResponse(node, getBatchData(cache.get(node.getId()))); + } + + private HashMap getBatchData(NodeEntry nodeEntry) { + V[] nodeShardEntries = nodeEntry.getData(); + boolean[] emptyResponses = nodeEntry.getEmptyShardResponse(); + HashMap shardData = new HashMap<>(); + for (Map.Entry shardIdEntry : shardIdToArray.entrySet()) { + ShardId shardId = shardIdEntry.getKey(); + Integer arrIndex = shardIdEntry.getValue(); + if (emptyResponses[arrIndex]) { + shardData.put(shardId, emptyResponse); + } else if (nodeShardEntries[arrIndex] != null) { + // ignore null responses here + shardData.put(shardId, nodeShardEntries[arrIndex]); + } + } + return shardData; + } + + private void fillShardIdKeys(Set shardIds) { + int shardIdIndex = 0; + for (ShardId shardId : shardIds) { + this.shardIdToArray.putIfAbsent(shardId, shardIdIndex++); + } + } + + /** + * A node entry, holding the state of the fetched data for a specific shard + * for a giving node. + */ + static class NodeEntry extends BaseNodeEntry { + private final V[] shardData; + private final boolean[] emptyShardResponse; // we can not rely on null entries of the shardData array, + // those null entries means that we need to ignore those entries. Empty responses on the other hand are + // actually needed in allocation/explain API response. So instead of storing full empty response object + // in cache, it's better to just store a boolean and create that object on the fly just before + // decision-making. + private final Predicate emptyShardResponsePredicate; + + NodeEntry(String nodeId, Class clazz, int batchSize, Predicate emptyShardResponsePredicate) { + super(nodeId); + this.shardData = (V[]) Array.newInstance(clazz, batchSize); + this.emptyShardResponse = new boolean[batchSize]; + this.emptyShardResponsePredicate = emptyShardResponsePredicate; + } + + void doneFetching(Map shardDataFromNode, Map shardIdKey) { + fillShardData(shardDataFromNode, shardIdKey); + super.doneFetching(); + } + + void clearShard(Integer shardIdIndex) { + this.shardData[shardIdIndex] = null; + emptyShardResponse[shardIdIndex] = false; + } + + V[] getData() { + return this.shardData; + } + + boolean[] getEmptyShardResponse() { + return emptyShardResponse; + } + + private void fillShardData(Map shardDataFromNode, Map shardIdKey) { + for (Map.Entry shardData : shardDataFromNode.entrySet()) { + if (shardData.getValue() != null) { + ShardId shardId = shardData.getKey(); + if (emptyShardResponsePredicate.test(shardData.getValue())) { + this.emptyShardResponse[shardIdKey.get(shardId)] = true; + this.shardData[shardIdKey.get(shardId)] = null; + } else { + this.shardData[shardIdKey.get(shardId)] = shardData.getValue(); + } + } + } + } + } + } +} diff --git a/server/src/main/java/org/opensearch/gateway/AsyncShardFetch.java b/server/src/main/java/org/opensearch/gateway/AsyncShardFetch.java index 3d129d4794a10..b664dd573ce67 100644 --- a/server/src/main/java/org/opensearch/gateway/AsyncShardFetch.java +++ b/server/src/main/java/org/opensearch/gateway/AsyncShardFetch.java @@ -82,10 +82,10 @@ public interface Lister, N protected final String type; protected final Map shardAttributesMap; private final Lister, T> action; - private final AsyncShardFetchCache cache; + protected final AsyncShardFetchCache cache; private final AtomicLong round = new AtomicLong(); private boolean closed; - private final String reroutingKey; + final String reroutingKey; private final Map> shardToIgnoreNodes = new HashMap<>(); @SuppressWarnings("unchecked") @@ -99,7 +99,7 @@ protected AsyncShardFetch( this.logger = logger; this.type = type; shardAttributesMap = new HashMap<>(); - shardAttributesMap.put(shardId, new ShardAttributes(shardId, customDataPath)); + shardAttributesMap.put(shardId, new ShardAttributes(customDataPath)); this.action = (Lister, T>) action; this.reroutingKey = "ShardId=[" + shardId.toString() + "]"; cache = new ShardCache<>(logger, reroutingKey, type); @@ -120,14 +120,15 @@ protected AsyncShardFetch( String type, Map shardAttributesMap, Lister, T> action, - String batchId + String batchId, + AsyncShardFetchCache cache ) { this.logger = logger; this.type = type; this.shardAttributesMap = shardAttributesMap; this.action = (Lister, T>) action; this.reroutingKey = "BatchID=[" + batchId + "]"; - cache = new ShardCache<>(logger, reroutingKey, type); + this.cache = cache; } @Override diff --git a/server/src/main/java/org/opensearch/gateway/AsyncShardFetchCache.java b/server/src/main/java/org/opensearch/gateway/AsyncShardFetchCache.java index 3140ceef4f3ee..2a4e6181467b0 100644 --- a/server/src/main/java/org/opensearch/gateway/AsyncShardFetchCache.java +++ b/server/src/main/java/org/opensearch/gateway/AsyncShardFetchCache.java @@ -48,6 +48,7 @@ * @opensearch.internal */ public abstract class AsyncShardFetchCache { + private final Logger logger; private final String type; diff --git a/server/src/main/java/org/opensearch/gateway/PrimaryShardBatchAllocator.java b/server/src/main/java/org/opensearch/gateway/PrimaryShardBatchAllocator.java index 8d222903b6f29..1979f33484d49 100644 --- a/server/src/main/java/org/opensearch/gateway/PrimaryShardBatchAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/PrimaryShardBatchAllocator.java @@ -15,6 +15,7 @@ import org.opensearch.cluster.routing.allocation.AllocateUnassignedDecision; import org.opensearch.cluster.routing.allocation.RoutingAllocation; import org.opensearch.gateway.AsyncShardFetch.FetchResult; +import org.opensearch.gateway.TransportNodesGatewayStartedShardHelper.GatewayStartedShard; import org.opensearch.gateway.TransportNodesGatewayStartedShardHelper.NodeGatewayStartedShard; import org.opensearch.gateway.TransportNodesListGatewayStartedShardsBatch.NodeGatewayStartedShardsBatch; @@ -132,9 +133,7 @@ private static List adaptToNodeShardStates( // build data for a shard from all the nodes nodeResponses.forEach((node, nodeGatewayStartedShardsBatch) -> { - TransportNodesGatewayStartedShardHelper.GatewayStartedShard shardData = nodeGatewayStartedShardsBatch - .getNodeGatewayStartedShardsBatch() - .get(unassignedShard.shardId()); + GatewayStartedShard shardData = nodeGatewayStartedShardsBatch.getNodeGatewayStartedShardsBatch().get(unassignedShard.shardId()); nodeShardStates.add( new NodeGatewayStartedShard( shardData.allocationId(), diff --git a/server/src/main/java/org/opensearch/gateway/ShardBatchResponseFactory.java b/server/src/main/java/org/opensearch/gateway/ShardBatchResponseFactory.java new file mode 100644 index 0000000000000..4b85ef995f1e1 --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/ShardBatchResponseFactory.java @@ -0,0 +1,51 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway; + +import org.opensearch.action.support.nodes.BaseNodeResponse; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.gateway.TransportNodesGatewayStartedShardHelper.GatewayStartedShard; +import org.opensearch.gateway.TransportNodesListGatewayStartedShardsBatch.NodeGatewayStartedShardsBatch; +import org.opensearch.indices.store.TransportNodesListShardStoreMetadataBatch.NodeStoreFilesMetadata; +import org.opensearch.indices.store.TransportNodesListShardStoreMetadataBatch.NodeStoreFilesMetadataBatch; + +import java.util.Map; + +/** + * A factory class to create new responses of batch transport actions like + * {@link TransportNodesListGatewayStartedShardsBatch} or {@link org.opensearch.indices.store.TransportNodesListShardStoreMetadataBatch} + * + * @param Node level response returned by batch transport actions. + * @param Shard level metadata returned by batch transport actions. + */ +public class ShardBatchResponseFactory { + private final boolean primary; + + public ShardBatchResponseFactory(boolean primary) { + this.primary = primary; + } + + public T getNewResponse(DiscoveryNode node, Map shardData) { + if (primary) { + return (T) new NodeGatewayStartedShardsBatch(node, (Map) shardData); + } else { + return (T) new NodeStoreFilesMetadataBatch(node, (Map) shardData); + } + } + + public Map getShardBatchData(T response) { + if (primary) { + return (Map) ((NodeGatewayStartedShardsBatch) response).getNodeGatewayStartedShardsBatch(); + } else { + return (Map) ((NodeStoreFilesMetadataBatch) response).getNodeStoreFilesMetadataBatch(); + } + } + +} diff --git a/server/src/main/java/org/opensearch/gateway/TransportNodesGatewayStartedShardHelper.java b/server/src/main/java/org/opensearch/gateway/TransportNodesGatewayStartedShardHelper.java index 27cce76b1b694..2ddae1d8410c9 100644 --- a/server/src/main/java/org/opensearch/gateway/TransportNodesGatewayStartedShardHelper.java +++ b/server/src/main/java/org/opensearch/gateway/TransportNodesGatewayStartedShardHelper.java @@ -42,6 +42,8 @@ * @opensearch.internal */ public class TransportNodesGatewayStartedShardHelper { + public static final String INDEX_NOT_FOUND = "node doesn't have meta data for index"; + public static GatewayStartedShard getShardInfoOnLocalNode( Logger logger, final ShardId shardId, @@ -72,7 +74,7 @@ public static GatewayStartedShard getShardInfoOnLocalNode( customDataPath = new IndexSettings(metadata, settings).customDataPath(); } else { logger.trace("{} node doesn't have meta data for the requests index", shardId); - throw new OpenSearchException("node doesn't have meta data for index " + shardId.getIndex()); + throw new OpenSearchException(INDEX_NOT_FOUND + " " + shardId.getIndex()); } } // we don't have an open shard on the store, validate the files on disk are openable @@ -230,6 +232,13 @@ public String toString() { buf.append("]"); return buf.toString(); } + + public static boolean isEmpty(GatewayStartedShard gatewayStartedShard) { + return gatewayStartedShard.allocationId() == null + && gatewayStartedShard.primary() == false + && gatewayStartedShard.storeException() == null + && gatewayStartedShard.replicationCheckpoint() == null; + } } /** diff --git a/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShardsBatch.java b/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShardsBatch.java index dad9cd94d9ba7..44aa49a8defa6 100644 --- a/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShardsBatch.java +++ b/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShardsBatch.java @@ -8,7 +8,6 @@ package org.opensearch.gateway; -import org.opensearch.OpenSearchException; import org.opensearch.action.ActionType; import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; @@ -28,7 +27,6 @@ import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.NodeEnvironment; -import org.opensearch.gateway.TransportNodesGatewayStartedShardHelper.GatewayStartedShard; import org.opensearch.indices.IndicesService; import org.opensearch.indices.store.ShardAttributes; import org.opensearch.threadpool.ThreadPool; @@ -40,6 +38,8 @@ import java.util.Map; import java.util.Objects; +import static org.opensearch.gateway.TransportNodesGatewayStartedShardHelper.GatewayStartedShard; +import static org.opensearch.gateway.TransportNodesGatewayStartedShardHelper.INDEX_NOT_FOUND; import static org.opensearch.gateway.TransportNodesGatewayStartedShardHelper.getShardInfoOnLocalNode; /** @@ -136,8 +136,10 @@ protected NodesGatewayStartedShardsBatch newResponse( @Override protected NodeGatewayStartedShardsBatch nodeOperation(NodeRequest request) { Map shardsOnNode = new HashMap<>(); - for (ShardAttributes shardAttr : request.shardAttributes.values()) { - final ShardId shardId = shardAttr.getShardId(); + // NOTE : If we ever change this for loop to run in parallel threads, we should re-visit the exception + // handling in AsyncShardBatchFetch class. + for (Map.Entry shardAttr : request.shardAttributes.entrySet()) { + final ShardId shardId = shardAttr.getKey(); try { shardsOnNode.put( shardId, @@ -147,16 +149,19 @@ protected NodeGatewayStartedShardsBatch nodeOperation(NodeRequest request) { namedXContentRegistry, nodeEnv, indicesService, - shardAttr.getCustomDataPath(), + shardAttr.getValue().getCustomDataPath(), settings, clusterService ) ); } catch (Exception e) { - shardsOnNode.put( - shardId, - new GatewayStartedShard(null, false, null, new OpenSearchException("failed to load started shards", e)) - ); + // should return null in case of known exceptions being returned from getShardInfoOnLocalNode method. + if (e instanceof IllegalStateException || e.getMessage().contains(INDEX_NOT_FOUND) || e instanceof IOException) { + shardsOnNode.put(shardId, null); + } else { + // return actual exception as it is for unknown exceptions + shardsOnNode.put(shardId, new GatewayStartedShard(null, false, null, e)); + } } } return new NodeGatewayStartedShardsBatch(clusterService.localNode(), shardsOnNode); @@ -264,13 +269,26 @@ public Map getNodeGatewayStartedShardsBatch() { public NodeGatewayStartedShardsBatch(StreamInput in) throws IOException { super(in); - this.nodeGatewayStartedShardsBatch = in.readMap(ShardId::new, GatewayStartedShard::new); + this.nodeGatewayStartedShardsBatch = in.readMap(ShardId::new, i -> { + if (i.readBoolean()) { + return new GatewayStartedShard(i); + } else { + return null; + } + }); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeMap(nodeGatewayStartedShardsBatch, (o, k) -> k.writeTo(o), (o, v) -> v.writeTo(o)); + out.writeMap(nodeGatewayStartedShardsBatch, (o, k) -> k.writeTo(o), (o, v) -> { + if (v != null) { + o.writeBoolean(true); + v.writeTo(o); + } else { + o.writeBoolean(false); + } + }); } public NodeGatewayStartedShardsBatch(DiscoveryNode node, Map nodeGatewayStartedShardsBatch) { diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index 3c22baf3c1563..900aab53233c8 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -759,6 +759,7 @@ public static IndexMergePolicy fromString(String text) { private volatile String defaultSearchPipeline; private final boolean widenIndexSortType; + private final boolean assignedOnRemoteNode; /** * The maximum age of a retention lease before it is considered expired. @@ -981,6 +982,7 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti * Now this sortField (IndexSort) is stored in SegmentInfo and we need to maintain backward compatibility for them. */ widenIndexSortType = IndexMetadata.SETTING_INDEX_VERSION_CREATED.get(settings).before(V_2_7_0); + assignedOnRemoteNode = RemoteStoreNodeAttribute.isRemoteDataAttributePresent(this.getNodeSettings()); setEnableFuzzySetForDocId(scopedSettings.get(INDEX_DOC_ID_FUZZY_SET_ENABLED_SETTING)); setDocIdFuzzySetFalsePositiveProbability(scopedSettings.get(INDEX_DOC_ID_FUZZY_SET_FALSE_POSITIVE_PROBABILITY_SETTING)); @@ -1226,7 +1228,7 @@ public int getNumberOfReplicas() { * proper index setting during the migration. */ public boolean isSegRepEnabledOrRemoteNode() { - return ReplicationType.SEGMENT.equals(replicationType) || isRemoteNode(); + return ReplicationType.SEGMENT.equals(replicationType) || isAssignedOnRemoteNode(); } public boolean isSegRepLocalEnabled() { @@ -1244,8 +1246,8 @@ public boolean isRemoteStoreEnabled() { return isRemoteStoreEnabled; } - public boolean isRemoteNode() { - return RemoteStoreNodeAttribute.isRemoteDataAttributePresent(this.getNodeSettings()); + public boolean isAssignedOnRemoteNode() { + return assignedOnRemoteNode; } /** diff --git a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java index 961780348f07a..eefcfa66cb412 100644 --- a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java @@ -489,7 +489,7 @@ protected final void closeNoLock(String reason, CountDownLatch closedLatch) { during promotion. */ if (engineConfig.getIndexSettings().isRemoteStoreEnabled() == false - && engineConfig.getIndexSettings().isRemoteNode() == false) { + && engineConfig.getIndexSettings().isAssignedOnRemoteNode() == false) { latestSegmentInfos.counter = latestSegmentInfos.counter + SI_COUNTER_INCREMENT; latestSegmentInfos.changed(); } diff --git a/server/src/main/java/org/opensearch/index/mapper/DerivedField.java b/server/src/main/java/org/opensearch/index/mapper/DerivedField.java new file mode 100644 index 0000000000000..7ebe4e5f0b0e8 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/mapper/DerivedField.java @@ -0,0 +1,90 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.script.Script; + +import java.io.IOException; +import java.util.Objects; + +/** + * DerivedField representation: expects a name, type and script. + */ +@PublicApi(since = "2.14.0") +public class DerivedField implements Writeable, ToXContentFragment { + + private final String name; + private final String type; + private final Script script; + + public DerivedField(String name, String type, Script script) { + this.name = name; + this.type = type; + this.script = script; + } + + public DerivedField(StreamInput in) throws IOException { + name = in.readString(); + type = in.readString(); + script = new Script(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(name); + out.writeString(type); + script.writeTo(out); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(name); + builder.field("type", type); + builder.field("script", script); + builder.endObject(); + return builder; + } + + public String getName() { + return name; + } + + public String getType() { + return type; + } + + public Script getScript() { + return script; + } + + @Override + public int hashCode() { + return Objects.hash(name, type, script); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + DerivedField other = (DerivedField) obj; + return Objects.equals(name, other.name) && Objects.equals(type, other.type) && Objects.equals(script, other.script); + } + +} diff --git a/server/src/main/java/org/opensearch/index/mapper/DerivedFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/DerivedFieldMapper.java new file mode 100644 index 0000000000000..c6ae71320c35c --- /dev/null +++ b/server/src/main/java/org/opensearch/index/mapper/DerivedFieldMapper.java @@ -0,0 +1,158 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.apache.lucene.index.IndexableField; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.script.Script; + +import java.io.IOException; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.Function; + +/** + * A field mapper for derived fields + * + * @opensearch.internal + */ +public class DerivedFieldMapper extends ParametrizedFieldMapper { + + public static final String CONTENT_TYPE = "derived"; + + private static DerivedFieldMapper toType(FieldMapper in) { + return (DerivedFieldMapper) in; + } + + /** + * Builder for this field mapper + * + * @opensearch.internal + */ + public static class Builder extends ParametrizedFieldMapper.Builder { + // TODO: The type of parameter may change here if the actual underlying FieldType object is needed + private final Parameter type = Parameter.stringParam("type", false, m -> toType(m).type, ""); + + private final Parameter