diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 144a8b71fca39..6a5db93053e3b 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -26,4 +26,5 @@ BWC_VERSION: - "2.10.1" - "2.11.0" - "2.11.1" + - "2.11.2" - "2.12.0" diff --git a/.gitattributes b/.gitattributes index 65f909981595f..b74462afb27bd 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,4 +1,5 @@ * text eol=lf +*.jar binary *.bat binary *.zip binary *.exe binary diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 0defcaf727771..8c4f4d59ea1fc 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -711,6 +711,14 @@ updates: labels: - "dependabot" - "dependencies" + - directory: /modules/crypto/ + open-pull-requests-limit: 1 + package-ecosystem: gradle + schedule: + interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/ open-pull-requests-limit: 1 package-ecosystem: gradle diff --git a/.github/workflows/add-untriaged.yml b/.github/workflows/add-untriaged.yml index 11db8b9a61f50..38de96f663051 100644 --- a/.github/workflows/add-untriaged.yml +++ b/.github/workflows/add-untriaged.yml @@ -9,7 +9,7 @@ jobs: if: github.repository == 'opensearch-project/OpenSearch' runs-on: ubuntu-latest steps: - - uses: actions/github-script@v6 + - uses: actions/github-script@v7 with: script: | github.rest.issues.addLabels({ diff --git a/.github/workflows/assemble.yml b/.github/workflows/assemble.yml index 87cecdf38c072..382105364c048 100644 --- a/.github/workflows/assemble.yml +++ b/.github/workflows/assemble.yml @@ -12,7 +12,7 @@ jobs: steps: - uses: actions/checkout@v4 - name: Set up JDK ${{ matrix.java }} - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: java-version: ${{ matrix.java }} distribution: temurin diff --git a/.github/workflows/dependabot_pr.yml b/.github/workflows/dependabot_pr.yml index 2a5e539b214d3..e6feb3b852ad0 100644 --- a/.github/workflows/dependabot_pr.yml +++ b/.github/workflows/dependabot_pr.yml @@ -27,7 +27,7 @@ jobs: ./gradlew updateSHAs - name: Commit the changes - uses: stefanzweifel/git-auto-commit-action@v4.7.2 + uses: stefanzweifel/git-auto-commit-action@v5 with: commit_message: Updating SHAs branch: ${{ github.head_ref }} @@ -40,7 +40,7 @@ jobs: ./gradlew spotlessApply - name: Commit the changes - uses: stefanzweifel/git-auto-commit-action@v4.7.2 + uses: stefanzweifel/git-auto-commit-action@v5 with: commit_message: Spotless formatting branch: ${{ github.head_ref }} @@ -54,7 +54,7 @@ jobs: version: 'Unreleased 2.x' - name: Commit the changes - uses: stefanzweifel/git-auto-commit-action@v4 + uses: stefanzweifel/git-auto-commit-action@v5 with: commit_message: "Update changelog" branch: ${{ github.head_ref }} diff --git a/.github/workflows/lucene-snapshots.yml b/.github/workflows/lucene-snapshots.yml index ec5893ea546a9..05ca93e7be2aa 100644 --- a/.github/workflows/lucene-snapshots.yml +++ b/.github/workflows/lucene-snapshots.yml @@ -7,7 +7,8 @@ on: # Inputs the workflow accepts. inputs: ref: - description: + description: 'Lucene ref in github.com/apache/lucene' + type: string required: false default: 'main' @@ -21,33 +22,52 @@ jobs: contents: read steps: - - uses: actions/checkout@v4 - - name: Set up JDK 17 - uses: actions/setup-java@v3 - with: - java-version: '17' - distribution: 'adopt' - - - name: Checkout Lucene + - name: Checkout Lucene ref:${{ github.event.inputs.ref }} uses: actions/checkout@v4 with: repository: 'apache/lucene' - path: lucene ref: ${{ github.event.inputs.ref }} - - name: Set hash - working-directory: ./lucene + - name: Get Java Min Version and Lucene Revision from Lucene Repository run: | - echo "REVISION=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT - id: version + java_version=`cat build.gradle | grep minJavaVersion | head -1 | grep -Eo '_[0-9]+$' | tr -d '_'` + echo "JAVA_VERSION=$java_version" >> $GITHUB_ENV + echo "REVISION=$(git rev-parse --short HEAD)" >> $GITHUB_ENV + + - name: Setup JDK ${{ env.JAVA_VERSION }} + uses: actions/setup-java@v4 + with: + java-version: ${{ env.JAVA_VERSION }} + distribution: 'temurin' - name: Initialize gradle settings - working-directory: ./lucene run: ./gradlew localSettings - name: Publish Lucene to local maven repo. - working-directory: ./lucene - run: ./gradlew publishJarsPublicationToMavenLocal -Pversion.suffix=snapshot-${{ steps.version.outputs.REVISION }} + run: ./gradlew publishJarsPublicationToMavenLocal -Pversion.suffix=snapshot-${{ env.REVISION }} + + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.LUCENE_SNAPSHOTS_SECRET_ROLE }} + aws-region: us-east-1 + + - name: Get S3 Bucket + id: get_s3_bucket + run: | + lucene_snapshots_bucket=`aws secretsmanager get-secret-value --secret-id jenkins-artifact-bucket-name --query SecretString --output text` + echo "::add-mask::$lucene_snapshots_bucket" + echo "LUCENE_SNAPSHOTS_BUCKET=$lucene_snapshots_bucket" >> $GITHUB_OUTPUT + + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.LUCENE_SNAPSHOTS_S3_ROLE }} + aws-region: us-east-1 + + - name: Copy files to S3 with the aws CLI (New) + run: | + aws s3 cp ~/.m2/repository/org/apache/lucene/ s3://${{ steps.get_s3_bucket.outputs.LUCENE_SNAPSHOTS_BUCKET }}/snapshots/lucene/org/apache/lucene/ --recursive --no-progress - name: Configure AWS credentials uses: aws-actions/configure-aws-credentials@v4 @@ -55,6 +75,7 @@ jobs: role-to-assume: ${{ secrets.LUCENE_SNAPSHOTS_ROLE }} aws-region: us-west-2 - - name: Copy files to S3 with the aws CLI. + # We will remove this step once all the lucene snapshots old links are updated with the new one + - name: Copy files to S3 with the aws CLI (Old) run: | aws s3 cp ~/.m2/repository/org/apache/lucene/ s3://${{ secrets.LUCENE_SNAPSHOTS_BUCKET }}/snapshots/lucene/org/apache/lucene/ --recursive --no-progress diff --git a/.github/workflows/maintainer-approval.yml b/.github/workflows/maintainer-approval.yml new file mode 100644 index 0000000000000..2f87afd372d90 --- /dev/null +++ b/.github/workflows/maintainer-approval.yml @@ -0,0 +1,33 @@ +name: Maintainers approval + +on: + pull_request_review: + types: [submitted] + +jobs: + maintainer-approved-check: + name: Minimum approval count + runs-on: ubuntu-latest + steps: + - id: find-maintainers + uses: actions/github-script@v7 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + result-encoding: string + script: | + // Get the collaborators - filtered to maintainer permissions + const maintainersResponse = await github.request('GET /repos/{owner}/{repo}/collaborators', { + owner: context.repo.owner, + repo: context.repo.repo, + permission: 'maintain', + affiliation: 'all', + per_page: 100 + }); + + return maintainersResponse.data.map(item => item.login).join(', '); + + - uses: peternied/required-approval@v1.2 + with: + token: ${{ secrets.GITHUB_TOKEN }} + min-required: 1 + required-approvers-list: ${{ steps.find-maintainers.outputs.result }} diff --git a/.github/workflows/precommit.yml b/.github/workflows/precommit.yml index cd75eb47946a4..800aacec98516 100644 --- a/.github/workflows/precommit.yml +++ b/.github/workflows/precommit.yml @@ -12,7 +12,7 @@ jobs: steps: - uses: actions/checkout@v4 - name: Set up JDK ${{ matrix.java }} - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: java-version: ${{ matrix.java }} distribution: temurin diff --git a/.github/workflows/publish-maven-snapshots.yml b/.github/workflows/publish-maven-snapshots.yml index 93bbfb8bbeab8..1b2db22c7c20b 100644 --- a/.github/workflows/publish-maven-snapshots.yml +++ b/.github/workflows/publish-maven-snapshots.yml @@ -20,7 +20,7 @@ jobs: steps: - uses: actions/checkout@v4 - name: Set up JDK 17 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: adopt java-version: 17 diff --git a/CHANGELOG.md b/CHANGELOG.md index 65c25e3e18716..211667e170800 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,15 +12,11 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Introduce new dynamic cluster setting to control slice computation for concurrent segment search ([#9107](https://github.com/opensearch-project/OpenSearch/pull/9107)) - Implement on behalf of token passing for extensions ([#8679](https://github.com/opensearch-project/OpenSearch/pull/8679), [#10664](https://github.com/opensearch-project/OpenSearch/pull/10664)) - Provide service accounts tokens to extensions ([#9618](https://github.com/opensearch-project/OpenSearch/pull/9618)) -- [Admission control] Add enhancements to FS stats to include read/write time, queue size and IO time ([#10541](https://github.com/opensearch-project/OpenSearch/pull/10541)) -- [Admission control] Add Resource usage collector service and resource usage tracker ([#9890](https://github.com/opensearch-project/OpenSearch/pull/9890)) -- [Remote cluster state] Change file names for remote cluster state ([#10557](https://github.com/opensearch-project/OpenSearch/pull/10557)) -- [Remote cluster state] Upload global metadata in cluster state to remote store([#10404](https://github.com/opensearch-project/OpenSearch/pull/10404)) -- [Remote cluster state] Download functionality of global metadata from remote store ([#10535](https://github.com/opensearch-project/OpenSearch/pull/10535)) -- [Remote cluster state] Restore global metadata from remote store when local state is lost after quorum loss ([#10404](https://github.com/opensearch-project/OpenSearch/pull/10404)) - [AdmissionControl] Added changes for AdmissionControl Interceptor and AdmissionControlService for RateLimiting ([#9286](https://github.com/opensearch-project/OpenSearch/pull/9286)) - GHA to verify checklist items completion in PR descriptions ([#10800](https://github.com/opensearch-project/OpenSearch/pull/10800)) -- [Remote cluster state] Restore cluster state version during remote state auto restore ([#10853](https://github.com/opensearch-project/OpenSearch/pull/10853)) +- Allow to pass the list settings through environment variables (like [], ["a", "b", "c"], ...) ([#10625](https://github.com/opensearch-project/OpenSearch/pull/10625)) +- [Admission Control] Integrate CPU AC with ResourceUsageCollector and add CPU AC stats to nodes/stats ([#10887](https://github.com/opensearch-project/OpenSearch/pull/10887)) +- Maintainer approval check ([#11378](https://github.com/opensearch-project/OpenSearch/pull/11378)) ### Dependencies - Bump `log4j-core` from 2.18.0 to 2.19.0 @@ -53,6 +49,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bumps jetty version to 9.4.52.v20230823 to fix GMS-2023-1857 ([#9822](https://github.com/opensearch-project/OpenSearch/pull/9822)) - Bump `org.eclipse.jgit` from 6.5.0 to 6.7.0 ([#10147](https://github.com/opensearch-project/OpenSearch/pull/10147)) - Bump OpenTelemetry from 1.30.1 to 1.31.0 ([#10617](https://github.com/opensearch-project/OpenSearch/pull/10617)) +- Bump OpenTelemetry from 1.31.0 to 1.32.0 and OpenTelemetry Semconv from 1.21.0-alpha to 1.23.1-alpha ([#11305](https://github.com/opensearch-project/OpenSearch/pull/11305)) ### Changed - [CCR] Add getHistoryOperationsFromTranslog method to fetch the history snapshot from translogs ([#3948](https://github.com/opensearch-project/OpenSearch/pull/3948)) @@ -88,35 +85,72 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix 'org.apache.hc.core5.http.ParseException: Invalid protocol version' under JDK 16+ ([#4827](https://github.com/opensearch-project/OpenSearch/pull/4827)) - Fix compression support for h2c protocol ([#4944](https://github.com/opensearch-project/OpenSearch/pull/4944)) - Don't over-allocate in HeapBufferedAsyncEntityConsumer in order to consume the response ([#9993](https://github.com/opensearch-project/OpenSearch/pull/9993)) +- [BUG] Fix the thread context that is not properly cleared and messes up the traces ([#10873](https://github.com/opensearch-project/OpenSearch/pull/10873)) +- Handle canMatchSearchAfter for frozen context scenario ([#11249](https://github.com/opensearch-project/OpenSearch/pull/11249)) ### Security ## [Unreleased 2.x] ### Added +- [Admission control] Add Resource usage collector service and resource usage tracker ([#9890](https://github.com/opensearch-project/OpenSearch/pull/9890)) +- [Admission control] Add enhancements to FS stats to include read/write time, queue size and IO time ([#10541](https://github.com/opensearch-project/OpenSearch/pull/10541)) +- [Remote cluster state] Change file names for remote cluster state ([#10557](https://github.com/opensearch-project/OpenSearch/pull/10557)) +- [Search Pipelines] Add request-scoped state shared between processors (and three new processors) ([#9405](https://github.com/opensearch-project/OpenSearch/pull/9405)) - Per request phase latency ([#10351](https://github.com/opensearch-project/OpenSearch/issues/10351)) - [Remote Store] Add repository stats for remote store([#10567](https://github.com/opensearch-project/OpenSearch/pull/10567)) -- Add search query categorizer ([#10255](https://github.com/opensearch-project/OpenSearch/pull/10255)) -- Introduce ConcurrentQueryProfiler to profile query using concurrent segment search path and support concurrency during rewrite and create weight ([10352](https://github.com/opensearch-project/OpenSearch/pull/10352)) -- Update the indexRandom function to create more segments for concurrent search tests ([10247](https://github.com/opensearch-project/OpenSearch/pull/10247)) +- [Remote cluster state] Upload global metadata in cluster state to remote store([#10404](https://github.com/opensearch-project/OpenSearch/pull/10404)) +- [Remote cluster state] Download functionality of global metadata from remote store ([#10535](https://github.com/opensearch-project/OpenSearch/pull/10535)) +- [Remote cluster state] Restore global metadata from remote store when local state is lost after quorum loss ([#10404](https://github.com/opensearch-project/OpenSearch/pull/10404)) - [Remote cluster state] Make index and global metadata upload timeout dynamic cluster settings ([#10814](https://github.com/opensearch-project/OpenSearch/pull/10814)) +- Add search query categorizer ([#10255](https://github.com/opensearch-project/OpenSearch/pull/10255)) +- Per request phase latency ([#10351](https://github.com/opensearch-project/OpenSearch/issues/10351)) - Add cluster state stats ([#10670](https://github.com/opensearch-project/OpenSearch/pull/10670)) -- Adding slf4j license header to LoggerMessageFormat.java ([#11069](https://github.com/opensearch-project/OpenSearch/pull/11069)) +- [Remote cluster state] Restore cluster state version during remote state auto restore ([#10853](https://github.com/opensearch-project/OpenSearch/pull/10853)) +- Update the indexRandom function to create more segments for concurrent search tests ([10247](https://github.com/opensearch-project/OpenSearch/pull/10247)) +- Add support for query profiler with concurrent aggregation ([#9248](https://github.com/opensearch-project/OpenSearch/pull/9248)) +- Introduce ConcurrentQueryProfiler to profile query using concurrent segment search path and support concurrency during rewrite and create weight ([10352](https://github.com/opensearch-project/OpenSearch/pull/10352)) +- Implement on behalf of token passing for extensions ([#8679](https://github.com/opensearch-project/OpenSearch/pull/8679)) +- Provide service accounts tokens to extensions ([#9618](https://github.com/opensearch-project/OpenSearch/pull/9618)) - [Streaming Indexing] Introduce new experimental server HTTP transport based on Netty 4 and Project Reactor (Reactor Netty) ([#9672](https://github.com/opensearch-project/OpenSearch/pull/9672)) +- Add back half_float BKD based sort query optimization ([#11024](https://github.com/opensearch-project/OpenSearch/pull/11024)) +- Request level coordinator slow logs ([#10650](https://github.com/opensearch-project/OpenSearch/pull/10650)) +- Add template snippets support for field and target_field in KV ingest processor ([#10040](https://github.com/opensearch-project/OpenSearch/pull/10040)) +- Allowing pipeline processors to access index mapping info by passing ingest service ref as part of the processor factory parameters ([#10307](https://github.com/opensearch-project/OpenSearch/pull/10307)) +- Make number of segment metadata files in remote segment store configurable ([#11329](https://github.com/opensearch-project/OpenSearch/pull/11329)) +- Allow changing number of replicas of searchable snapshot index ([#11317](https://github.com/opensearch-project/OpenSearch/pull/11317)) +- Adding slf4j license header to LoggerMessageFormat.java ([#11069](https://github.com/opensearch-project/OpenSearch/pull/11069)) +- [BWC and API enforcement] Introduce checks for enforcing the API restrictions ([#11175](https://github.com/opensearch-project/OpenSearch/pull/11175)) - Create separate transport action for render search template action ([#11170](https://github.com/opensearch-project/OpenSearch/pull/11170)) ### Dependencies -- Bump `com.google.api.grpc:proto-google-common-protos` from 2.10.0 to 2.25.1 ([#10208](https://github.com/opensearch-project/OpenSearch/pull/10208), [#10298](https://github.com/opensearch-project/OpenSearch/pull/10298)) - Bump Lucene from 9.7.0 to 9.8.0 ([10276](https://github.com/opensearch-project/OpenSearch/pull/10276)) +- Bump `commons-io:commons-io` from 2.13.0 to 2.15.1 ([#10294](https://github.com/opensearch-project/OpenSearch/pull/10294), [#11001](https://github.com/opensearch-project/OpenSearch/pull/11001), [#11002](https://github.com/opensearch-project/OpenSearch/pull/11002), [#11446](https://github.com/opensearch-project/OpenSearch/pull/11446), [#11554](https://github.com/opensearch-project/OpenSearch/pull/11554)) +- Bump `com.google.api.grpc:proto-google-common-protos` from 2.10.0 to 2.25.1 ([#10208](https://github.com/opensearch-project/OpenSearch/pull/10208), [#10298](https://github.com/opensearch-project/OpenSearch/pull/10298)) - Bump `com.netflix.nebula.ospackage-base` from 11.4.0 to 11.5.0 ([#10295](https://github.com/opensearch-project/OpenSearch/pull/10295)) -- Bump `org.bouncycastle:bc-fips` from 1.0.2.3 to 1.0.2.4 ([#10297](https://github.com/opensearch-project/OpenSearch/pull/10297)) - Bump `org.apache.zookeeper:zookeeper` from 3.9.0 to 3.9.1 ([#10506](https://github.com/opensearch-project/OpenSearch/pull/10506)) - Bump `de.thetaphi:forbiddenapis` from 3.5.1 to 3.6 ([#10508](https://github.com/opensearch-project/OpenSearch/pull/10508)) -- Bump `commons-io:commons-io` from 2.13.0 to 2.14.0 ([#10294](https://github.com/opensearch-project/OpenSearch/pull/10294)) - Bump `org.codehaus.woodstox:stax2-api` from 4.2.1 to 4.2.2 ([#10639](https://github.com/opensearch-project/OpenSearch/pull/10639)) +- Bump `org.bouncycastle:bc-fips` from 1.0.2.3 to 1.0.2.4 ([#10297](https://github.com/opensearch-project/OpenSearch/pull/10297)) - Bump `com.google.http-client:google-http-client` from 1.43.2 to 1.43.3 ([#10635](https://github.com/opensearch-project/OpenSearch/pull/10635)) - Bump `com.squareup.okio:okio` from 3.5.0 to 3.6.0 ([#10637](https://github.com/opensearch-project/OpenSearch/pull/10637)) -- Bump `org.apache.logging.log4j:log4j-core` from 2.20.0 to 2.21.1 ([#10858](https://github.com/opensearch-project/OpenSearch/pull/10858), [#11000](https://github.com/opensearch-project/OpenSearch/pull/11000)) +- Bump `org.apache.logging.log4j:log4j-core` from 2.20.0 to 2.22.0 ([#10858](https://github.com/opensearch-project/OpenSearch/pull/10858), [#11000](https://github.com/opensearch-project/OpenSearch/pull/11000), [#11270](https://github.com/opensearch-project/OpenSearch/pull/11270)) - Bump `aws-actions/configure-aws-credentials` from 2 to 4 ([#10504](https://github.com/opensearch-project/OpenSearch/pull/10504)) +- Bump `stefanzweifel/git-auto-commit-action` from 4 to 5 ([#11171](https://github.com/opensearch-project/OpenSearch/pull/11171)) +- Bump `actions/github-script` from 6 to 7 ([#11271](https://github.com/opensearch-project/OpenSearch/pull/11271)) +- Bump `jackson` and `jackson_databind` from 2.15.2 to 2.16.0 ([#11273](https://github.com/opensearch-project/OpenSearch/pull/11273)) +- Bump `netty` from 4.1.100.Final to 4.1.101.Final ([#11294](https://github.com/opensearch-project/OpenSearch/pull/11294)) +- Bump `com.avast.gradle:gradle-docker-compose-plugin` from 0.16.12 to 0.17.5 ([#10163](https://github.com/opensearch-project/OpenSearch/pull/10163)) +- Bump `com.squareup.okhttp3:okhttp` from 4.11.0 to 4.12.0 ([#10861](https://github.com/opensearch-project/OpenSearch/pull/10861)) +- Bump `org.apache.commons:commons-text` from 1.10.0 to 1.11.0 ([#11344](https://github.com/opensearch-project/OpenSearch/pull/11344)) +- Bump `reactor-netty-core` from 1.1.12 to 1.1.13 ([#11350](https://github.com/opensearch-project/OpenSearch/pull/11350)) +- Bump `com.gradle.enterprise` from 3.14.1 to 3.15.1 ([#11339](https://github.com/opensearch-project/OpenSearch/pull/11339)) +- Bump `actions/setup-java` from 3 to 4 ([#11447](https://github.com/opensearch-project/OpenSearch/pull/11447)) +- Bump `commons-net:commons-net` from 3.9.0 to 3.10.0 ([#11450](https://github.com/opensearch-project/OpenSearch/pull/11450)) +- Bump `org.apache.maven:maven-model` from 3.9.4 to 3.9.6 ([#11445](https://github.com/opensearch-project/OpenSearch/pull/11445)) +- Bump `org.apache.xmlbeans:xmlbeans` from 5.1.1 to 5.2.0 ([#11448](https://github.com/opensearch-project/OpenSearch/pull/11448)) +- Bump `logback-core` and `logback-classic` to 1.2.13 ([#11521](https://github.com/opensearch-project/OpenSearch/pull/11521)) +- Bumps `jetty` version from 9.4.52.v20230823 to 9.4.53.v20231009 ([#11539](https://github.com/opensearch-project/OpenSearch/pull/11539)) +- Bump `org.wiremock:wiremock-standalone` from 3.1.0 to 3.3.1 ([#11555](https://github.com/opensearch-project/OpenSearch/pull/11555)) ### Changed - Mute the query profile IT with concurrent execution ([#9840](https://github.com/opensearch-project/OpenSearch/pull/9840)) @@ -127,10 +161,15 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [BUG] Fix java.lang.SecurityException in repository-gcs plugin ([#10642](https://github.com/opensearch-project/OpenSearch/pull/10642)) - Add telemetry tracer/metric enable flag and integ test. ([#10395](https://github.com/opensearch-project/OpenSearch/pull/10395)) - Add instrumentation for indexing in transport bulk action and transport shard bulk action. ([#10273](https://github.com/opensearch-project/OpenSearch/pull/10273)) -- [BUG] Disable sort optimization for HALF_FLOAT ([#10999](https://github.com/opensearch-project/OpenSearch/pull/10999)) +- Disallow removing some metadata fields by remove ingest processor ([#10895](https://github.com/opensearch-project/OpenSearch/pull/10895)) +- Refactor common parts from the Rounding class into a separate 'round' package ([#11023](https://github.com/opensearch-project/OpenSearch/issues/11023)) - Performance improvement for MultiTerm Queries on Keyword fields ([#7057](https://github.com/opensearch-project/OpenSearch/issues/7057)) +- Performance improvement for date histogram aggregations without sub-aggregations ([#11083](https://github.com/opensearch-project/OpenSearch/pull/11083)) - Disable concurrent aggs for Diversified Sampler and Sampler aggs ([#11087](https://github.com/opensearch-project/OpenSearch/issues/11087)) -- Use iterative approach to evaluate Regex.simpleMatch ([#11060](https://github.com/opensearch-project/OpenSearch/pull/11060)) +- Made leader/follower check timeout setting dynamic ([#10528](https://github.com/opensearch-project/OpenSearch/pull/10528)) +- Improve boolean parsing performance ([#11308](https://github.com/opensearch-project/OpenSearch/pull/11308)) +- Interpret byte array as primitive using VarHandles ([#11362](https://github.com/opensearch-project/OpenSearch/pull/11362)) +- Change error message when per shard document limit is breached ([#11312](https://github.com/opensearch-project/OpenSearch/pull/11312)) ### Deprecated @@ -144,7 +183,14 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix some test methods in SimulatePipelineRequestParsingTests never run and fix test failure ([#10496](https://github.com/opensearch-project/OpenSearch/pull/10496)) - Fix passing wrong parameter when calling newConfigurationException() in DotExpanderProcessor ([#10737](https://github.com/opensearch-project/OpenSearch/pull/10737)) - Fix SuggestSearch.testSkipDuplicates by forceing refresh when indexing its test documents ([#11068](https://github.com/opensearch-project/OpenSearch/pull/11068)) -- Adding version condition while adding geoshape doc values to the index, to ensure backward compatibility.([#11095](https://github.com/opensearch-project/OpenSearch/pull/11095)) +- Delegating CachingWeightWrapper#count to internal weight object ([#10543](https://github.com/opensearch-project/OpenSearch/pull/10543)) +- Fix per request latency last phase not tracked ([#10934](https://github.com/opensearch-project/OpenSearch/pull/10934)) +- Fix for stuck update action in a bulk with `retry_on_conflict` property ([#11152](https://github.com/opensearch-project/OpenSearch/issues/11152)) +- Fix the issue with DefaultSpanScope restoring wrong span in the TracerContextStorage upon detach ([#11316](https://github.com/opensearch-project/OpenSearch/issues/11316)) +- Remove shadowJar from `lang-painless` module publication ([#11369](https://github.com/opensearch-project/OpenSearch/issues/11369)) +- Fix remote shards balancer and remove unused variables ([#11167](https://github.com/opensearch-project/OpenSearch/pull/11167)) +- Fix bug where replication lag grows post primary relocation ([#11238](https://github.com/opensearch-project/OpenSearch/pull/11238)) +- Fix template setting override for replication type ([#11417](https://github.com/opensearch-project/OpenSearch/pull/11417)) ### Security diff --git a/benchmarks/build.gradle b/benchmarks/build.gradle index 02aa9319cc583..6b4634c7e791c 100644 --- a/benchmarks/build.gradle +++ b/benchmarks/build.gradle @@ -40,7 +40,7 @@ application { } base { - archivesBaseName = 'opensearch-benchmarks' + archivesName = 'opensearch-benchmarks' } test.enabled = false diff --git a/benchmarks/src/main/java/org/opensearch/common/ArrayRoundingBenchmark.java b/benchmarks/src/main/java/org/opensearch/common/round/RoundableBenchmark.java similarity index 89% rename from benchmarks/src/main/java/org/opensearch/common/ArrayRoundingBenchmark.java rename to benchmarks/src/main/java/org/opensearch/common/round/RoundableBenchmark.java index 64c0a9e1d7aa6..4e07af452968b 100644 --- a/benchmarks/src/main/java/org/opensearch/common/ArrayRoundingBenchmark.java +++ b/benchmarks/src/main/java/org/opensearch/common/round/RoundableBenchmark.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.common; +package org.opensearch.common.round; import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.BenchmarkMode; @@ -27,13 +27,13 @@ @Warmup(iterations = 3, time = 1) @Measurement(iterations = 1, time = 1) @BenchmarkMode(Mode.Throughput) -public class ArrayRoundingBenchmark { +public class RoundableBenchmark { @Benchmark - public void round(Blackhole bh, Options opts) { - Rounding.Prepared rounding = opts.supplier.get(); + public void floor(Blackhole bh, Options opts) { + Roundable roundable = opts.supplier.get(); for (long key : opts.queries) { - bh.consume(rounding.round(key)); + bh.consume(roundable.floor(key)); } } @@ -90,7 +90,7 @@ public static class Options { public String distribution; public long[] queries; - public Supplier supplier; + public Supplier supplier; @Setup public void setup() { @@ -130,10 +130,10 @@ public void setup() { switch (type) { case "binary": - supplier = () -> new Rounding.BinarySearchArrayRounding(values, size, null); + supplier = () -> new BinarySearcher(values, size); break; case "linear": - supplier = () -> new Rounding.BidirectionalLinearSearchArrayRounding(values, size, null); + supplier = () -> new BidirectionalLinearSearcher(values, size); break; default: throw new IllegalArgumentException("invalid type: " + type); diff --git a/build.gradle b/build.gradle index 9d62e942a4431..b1cd1d532bfeb 100644 --- a/build.gradle +++ b/build.gradle @@ -375,7 +375,7 @@ allprojects { } else { // Link to non-shadowed dependant projects project.javadoc.dependsOn "${upstreamProject.path}:javadoc" - String externalLinkName = upstreamProject.base.archivesBaseName + String externalLinkName = upstreamProject.base.archivesName String artifactPath = dep.group.replaceAll('\\.', '/') + '/' + externalLinkName.replaceAll('\\.', '/') + '/' + dep.version String projectRelativePath = project.relativePath(upstreamProject.buildDir) project.javadoc.options.linksOffline artifactsHost + "/javadoc/" + artifactPath, "${projectRelativePath}/docs/javadoc/" diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 6d3e0f018657e..34e94eac1826f 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -109,15 +109,15 @@ dependencies { api 'com.netflix.nebula:nebula-publishing-plugin:20.3.0' api 'com.netflix.nebula:gradle-info-plugin:12.1.6' api 'org.apache.rat:apache-rat:0.15' - api 'commons-io:commons-io:2.13.0' + api 'commons-io:commons-io:2.15.1' api "net.java.dev.jna:jna:5.13.0" api 'com.github.johnrengelman:shadow:8.1.1' api 'org.jdom:jdom2:2.0.6.1' api "org.jetbrains.kotlin:kotlin-stdlib-jdk8:${props.getProperty('kotlin')}" api 'de.thetaphi:forbiddenapis:3.6' - api 'com.avast.gradle:gradle-docker-compose-plugin:0.16.12' + api 'com.avast.gradle:gradle-docker-compose-plugin:0.17.5' api "org.yaml:snakeyaml:${props.getProperty('snakeyaml')}" - api 'org.apache.maven:maven-model:3.9.4' + api 'org.apache.maven:maven-model:3.9.6' api 'com.networknt:json-schema-validator:1.0.86' api 'org.jruby.jcodings:jcodings:1.0.58' api 'org.jruby.joni:joni:2.2.1' @@ -128,7 +128,7 @@ dependencies { testFixturesApi "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${props.getProperty('randomizedrunner')}" testFixturesApi gradleApi() testFixturesApi gradleTestKit() - testImplementation 'org.wiremock:wiremock-standalone:3.1.0' + testImplementation 'org.wiremock:wiremock-standalone:3.3.1' testImplementation "org.mockito:mockito-core:${props.getProperty('mockito')}" integTestImplementation('org.spockframework:spock-core:2.3-groovy-3.0') { exclude module: "groovy" diff --git a/buildSrc/src/main/groovy/org/opensearch/gradle/plugin/PluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/opensearch/gradle/plugin/PluginBuildPlugin.groovy index 556763333d279..13f5f8724c6f2 100644 --- a/buildSrc/src/main/groovy/org/opensearch/gradle/plugin/PluginBuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/opensearch/gradle/plugin/PluginBuildPlugin.groovy @@ -89,7 +89,7 @@ class PluginBuildPlugin implements Plugin { String name = extension1.name BasePluginExtension base = project.getExtensions().findByType(BasePluginExtension.class) - base.archivesBaseName = name + base.archivesName = name project.description = extension1.description if (extension1.name == null) { @@ -155,7 +155,7 @@ class PluginBuildPlugin implements Plugin { // Only configure publishing if applied externally if (extension.hasClientJar) { project.pluginManager.apply('com.netflix.nebula.maven-base-publish') - // Only change Jar tasks, we don't want a -client zip so we can't change archivesBaseName + // Only change Jar tasks, we don't want a -client zip so we can't change archivesName project.tasks.withType(Jar) { archiveBaseName = archiveBaseName.get() + "-client" } @@ -163,7 +163,7 @@ class PluginBuildPlugin implements Plugin { project.publishing.publications.nebula(MavenPublication).artifactId(extension.name + "-client") final BasePluginExtension base = project.getExtensions().findByType(BasePluginExtension.class) project.tasks.withType(GenerateMavenPom.class).configureEach { GenerateMavenPom generatePOMTask -> - generatePOMTask.destination = "${project.buildDir}/distributions/${base.archivesBaseName}-client-${project.versions.opensearch}.pom" + generatePOMTask.destination = "${project.buildDir}/distributions/${base.archivesName}-client-${project.versions.opensearch}.pom" } } else { if (project.plugins.hasPlugin(MavenPublishPlugin)) { diff --git a/buildSrc/src/main/java/org/opensearch/gradle/PublishPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/PublishPlugin.java index 97e923c366598..7ec21bba18c64 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/PublishPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/PublishPlugin.java @@ -77,7 +77,7 @@ public void apply(Project project) { } private static String getArchivesBaseName(Project project) { - return project.getExtensions().getByType(BasePluginExtension.class).getArchivesBaseName(); + return project.getExtensions().getByType(BasePluginExtension.class).getArchivesName().get(); } /**Configuration generation of maven poms. */ diff --git a/buildSrc/src/main/java/org/opensearch/gradle/testfixtures/TestFixturesPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/testfixtures/TestFixturesPlugin.java index 3aba941875115..c9e18426966f9 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/testfixtures/TestFixturesPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/testfixtures/TestFixturesPlugin.java @@ -170,6 +170,7 @@ public void execute(Task task) { .findFirst(); composeExtension.getExecutable().set(dockerCompose.isPresent() ? dockerCompose.get() : "/usr/bin/docker"); + composeExtension.getUseDockerComposeV2().set(false); tasks.named("composeUp").configure(t -> { // Avoid running docker-compose tasks in parallel in CI due to some issues on certain Linux distributions diff --git a/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle b/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle index dca2bce94ea6d..f24b61ef0d165 100644 --- a/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle +++ b/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle @@ -17,7 +17,7 @@ repositories { } dependencies { - implementation "org.apache.logging.log4j:log4j-core:2.21.1" + implementation "org.apache.logging.log4j:log4j-core:2.22.0" } ["0.0.1", "0.0.2"].forEach { v -> diff --git a/buildSrc/version.properties b/buildSrc/version.properties index f19437979c852..74d655cfb1045 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -7,8 +7,8 @@ bundled_jdk = 21.0.1+12 # optional dependencies spatial4j = 0.7 jts = 1.15.0 -jackson = 2.15.2 -jackson_databind = 2.15.2 +jackson = 2.16.0 +jackson_databind = 2.16.0 snakeyaml = 2.1 icu4j = 70.1 supercsv = 2.4.0 @@ -26,11 +26,11 @@ jakarta_annotation = 1.3.5 # when updating the JNA version, also update the version in buildSrc/build.gradle jna = 5.13.0 -netty = 4.1.100.Final +netty = 4.1.101.Final joda = 2.12.2 # project reactor -reactor_netty = 1.1.12 +reactor_netty = 1.1.13 reactor = 3.5.11 # client dependencies @@ -70,5 +70,5 @@ jzlib = 1.1.3 resteasy = 6.2.4.Final # opentelemetry dependencies -opentelemetry = 1.31.0 -opentelemetrysemconv = 1.21.0-alpha +opentelemetry = 1.32.0 +opentelemetrysemconv = 1.23.1-alpha diff --git a/client/benchmark/build.gradle b/client/benchmark/build.gradle index 6fd5262f0ab4f..c1af5fa92e35c 100644 --- a/client/benchmark/build.gradle +++ b/client/benchmark/build.gradle @@ -33,7 +33,7 @@ apply plugin: 'application' base { group = 'org.opensearch.client' - archivesBaseName = 'client-benchmarks' + archivesName = 'client-benchmarks' } // Not published so no need to assemble diff --git a/client/rest-high-level/build.gradle b/client/rest-high-level/build.gradle index 770cb3f78ca47..fdc93d8037ce6 100644 --- a/client/rest-high-level/build.gradle +++ b/client/rest-high-level/build.gradle @@ -39,7 +39,7 @@ apply plugin: 'opensearch.rest-resources' base { group = 'org.opensearch.client' - archivesBaseName = 'opensearch-rest-high-level-client' + archivesName = 'opensearch-rest-high-level-client' } restResources { diff --git a/client/rest/build.gradle b/client/rest/build.gradle index 2c437c909fb03..ff3c322c5ccf7 100644 --- a/client/rest/build.gradle +++ b/client/rest/build.gradle @@ -40,7 +40,7 @@ java { base { group = 'org.opensearch.client' - archivesBaseName = 'opensearch-rest-client' + archivesName = 'opensearch-rest-client' } dependencies { diff --git a/client/sniffer/build.gradle b/client/sniffer/build.gradle index f645b2dbbc933..4b50a996d1f9f 100644 --- a/client/sniffer/build.gradle +++ b/client/sniffer/build.gradle @@ -37,7 +37,7 @@ java { base { group = 'org.opensearch.client' - archivesBaseName = 'opensearch-rest-client-sniffer' + archivesName = 'opensearch-rest-client-sniffer' } dependencies { diff --git a/client/sniffer/licenses/jackson-core-2.15.2.jar.sha1 b/client/sniffer/licenses/jackson-core-2.15.2.jar.sha1 deleted file mode 100644 index ec6781b968eed..0000000000000 --- a/client/sniffer/licenses/jackson-core-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a6fe1836469a69b3ff66037c324d75fc66ef137c \ No newline at end of file diff --git a/client/sniffer/licenses/jackson-core-2.16.0.jar.sha1 b/client/sniffer/licenses/jackson-core-2.16.0.jar.sha1 new file mode 100644 index 0000000000000..c2b70fb4ae202 --- /dev/null +++ b/client/sniffer/licenses/jackson-core-2.16.0.jar.sha1 @@ -0,0 +1 @@ +899e5cf01be55fbf094ad72b2edb0c5df99111ee \ No newline at end of file diff --git a/distribution/archives/darwin-arm64-tar/build.gradle b/distribution/archives/darwin-arm64-tar/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/archives/darwin-arm64-tar/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/archives/darwin-tar/build.gradle b/distribution/archives/darwin-tar/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/archives/darwin-tar/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/archives/freebsd-tar/build.gradle b/distribution/archives/freebsd-tar/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/archives/freebsd-tar/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/archives/integ-test-zip/build.gradle b/distribution/archives/integ-test-zip/build.gradle index 9418223b0a44d..ffaea5e8ca771 100644 --- a/distribution/archives/integ-test-zip/build.gradle +++ b/distribution/archives/integ-test-zip/build.gradle @@ -38,7 +38,7 @@ apply plugin: 'com.netflix.nebula.maven-publish' base { group = "org.opensearch.distribution.integ-test-zip" - archivesBaseName = "opensearch" + archivesName = "opensearch" } integTest { diff --git a/distribution/archives/jre-linux-tar/build.gradle b/distribution/archives/jre-linux-tar/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/archives/jre-linux-tar/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/archives/linux-arm64-tar/build.gradle b/distribution/archives/linux-arm64-tar/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/archives/linux-arm64-tar/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/archives/linux-ppc64le-tar/build.gradle b/distribution/archives/linux-ppc64le-tar/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/archives/linux-ppc64le-tar/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/archives/linux-s390x-tar/build.gradle b/distribution/archives/linux-s390x-tar/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/archives/linux-s390x-tar/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/archives/linux-tar/build.gradle b/distribution/archives/linux-tar/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/archives/linux-tar/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/archives/no-jdk-darwin-arm64-tar/build.gradle b/distribution/archives/no-jdk-darwin-arm64-tar/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/archives/no-jdk-darwin-arm64-tar/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/archives/no-jdk-darwin-tar/build.gradle b/distribution/archives/no-jdk-darwin-tar/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/archives/no-jdk-darwin-tar/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/archives/no-jdk-freebsd-tar/build.gradle b/distribution/archives/no-jdk-freebsd-tar/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/archives/no-jdk-freebsd-tar/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/archives/no-jdk-linux-arm64-tar/build.gradle b/distribution/archives/no-jdk-linux-arm64-tar/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/archives/no-jdk-linux-arm64-tar/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/archives/no-jdk-linux-ppc64le-tar/build.gradle b/distribution/archives/no-jdk-linux-ppc64le-tar/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/archives/no-jdk-linux-ppc64le-tar/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/archives/no-jdk-linux-tar/build.gradle b/distribution/archives/no-jdk-linux-tar/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/archives/no-jdk-linux-tar/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/archives/no-jdk-windows-zip/build.gradle b/distribution/archives/no-jdk-windows-zip/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/archives/no-jdk-windows-zip/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/archives/windows-zip/build.gradle b/distribution/archives/windows-zip/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/archives/windows-zip/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/docker/docker-arm64-build-context/build.gradle b/distribution/docker/docker-arm64-build-context/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/docker/docker-arm64-build-context/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/docker/docker-arm64-export/build.gradle b/distribution/docker/docker-arm64-export/build.gradle index 3506c4e39c234..62f3dc68b0c8e 100644 --- a/distribution/docker/docker-arm64-export/build.gradle +++ b/distribution/docker/docker-arm64-export/build.gradle @@ -11,3 +11,5 @@ // This file is intentionally blank. All configuration of the // export is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/docker/docker-export/build.gradle b/distribution/docker/docker-export/build.gradle index 3506c4e39c234..62f3dc68b0c8e 100644 --- a/distribution/docker/docker-export/build.gradle +++ b/distribution/docker/docker-export/build.gradle @@ -11,3 +11,5 @@ // This file is intentionally blank. All configuration of the // export is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/docker/docker-ppc64le-export/build.gradle b/distribution/docker/docker-ppc64le-export/build.gradle index 820a0cdf69dfc..ae7def32c4d6c 100644 --- a/distribution/docker/docker-ppc64le-export/build.gradle +++ b/distribution/docker/docker-ppc64le-export/build.gradle @@ -10,3 +10,5 @@ // This file is intentionally blank. All configuration of the // export is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/docker/docker-s390x-export/build.gradle b/distribution/docker/docker-s390x-export/build.gradle index 3506c4e39c234..62f3dc68b0c8e 100644 --- a/distribution/docker/docker-s390x-export/build.gradle +++ b/distribution/docker/docker-s390x-export/build.gradle @@ -11,3 +11,5 @@ // This file is intentionally blank. All configuration of the // export is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/docker/src/docker/config/log4j2.properties b/distribution/docker/src/docker/config/log4j2.properties index 761478a9fdc6e..8edd6a7354a16 100644 --- a/distribution/docker/src/docker/config/log4j2.properties +++ b/distribution/docker/src/docker/config/log4j2.properties @@ -34,6 +34,16 @@ logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling logger.deprecation.appenderRef.header_warning.ref = header_warning logger.deprecation.additivity = false +appender.search_request_slowlog_json_appender.type = Console +appender.search_request_slowlog_json_appender.name = search_request_slowlog_json_appender +appender.search_request_slowlog_json_appender.layout.type = OpenSearchJsonLayout +appender.search_request_slowlog_json_appender.layout.type_name = search_request_slowlog + +logger.search_request_slowlog_logger.name = cluster.search.request.slowlog +logger.search_request_slowlog_logger.level = trace +logger.search_request_slowlog_logger.appenderRef.search_request_slowlog_json_appender.ref = search_request_slowlog_json_appender +logger.search_request_slowlog_logger.additivity = false + appender.index_search_slowlog_rolling.type = Console appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling appender.index_search_slowlog_rolling.layout.type = OpenSearchJsonLayout diff --git a/distribution/packages/arm64-deb/build.gradle b/distribution/packages/arm64-deb/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/packages/arm64-deb/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/packages/arm64-no-jdk-deb/build.gradle b/distribution/packages/arm64-no-jdk-deb/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/packages/arm64-no-jdk-deb/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/packages/arm64-no-jdk-rpm/build.gradle b/distribution/packages/arm64-no-jdk-rpm/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/packages/arm64-no-jdk-rpm/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/packages/arm64-rpm/build.gradle b/distribution/packages/arm64-rpm/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/packages/arm64-rpm/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/packages/deb/build.gradle b/distribution/packages/deb/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/packages/deb/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/packages/no-jdk-arm64-deb/build.gradle b/distribution/packages/no-jdk-arm64-deb/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/packages/no-jdk-arm64-deb/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/packages/no-jdk-arm64-rpm/build.gradle b/distribution/packages/no-jdk-arm64-rpm/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/packages/no-jdk-arm64-rpm/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/packages/no-jdk-deb/build.gradle b/distribution/packages/no-jdk-deb/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/packages/no-jdk-deb/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/packages/no-jdk-rpm/build.gradle b/distribution/packages/no-jdk-rpm/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/packages/no-jdk-rpm/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/packages/rpm/build.gradle b/distribution/packages/rpm/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/packages/rpm/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/src/config/log4j2.properties b/distribution/src/config/log4j2.properties index bb27aaf2e22e6..d040afae82e53 100644 --- a/distribution/src/config/log4j2.properties +++ b/distribution/src/config/log4j2.properties @@ -113,6 +113,47 @@ logger.deprecation.appenderRef.deprecation_rolling_old.ref = deprecation_rolling logger.deprecation.appenderRef.header_warning.ref = header_warning logger.deprecation.additivity = false +######## Search Request Slowlog JSON #################### +appender.search_request_slowlog_json_appender.type = RollingFile +appender.search_request_slowlog_json_appender.name = search_request_slowlog_json_appender +appender.search_request_slowlog_json_appender.fileName = ${sys:opensearch.logs.base_path}${sys:file.separator}${sys:opensearch.logs\ + .cluster_name}_index_search_slowlog.json +appender.search_request_slowlog_json_appender.filePermissions = rw-r----- +appender.search_request_slowlog_json_appender.layout.type = OpenSearchJsonLayout +appender.search_request_slowlog_json_appender.layout.type_name = search_request_slowlog +appender.search_request_slowlog_json_appender.layout.opensearchmessagefields=message,took,took_millis,phase_took,total_hits,search_type,shards,source,id + +appender.search_request_slowlog_json_appender.filePattern = ${sys:opensearch.logs.base_path}${sys:file.separator}${sys:opensearch.logs\ + .cluster_name}_index_search_slowlog-%i.json.gz +appender.search_request_slowlog_json_appender.policies.type = Policies +appender.search_request_slowlog_json_appender.policies.size.type = SizeBasedTriggeringPolicy +appender.search_request_slowlog_json_appender.policies.size.size = 1GB +appender.search_request_slowlog_json_appender.strategy.type = DefaultRolloverStrategy +appender.search_request_slowlog_json_appender.strategy.max = 4 +################################################# +######## Search Request Slowlog Log File - old style pattern #### +appender.search_request_slowlog_log_appender.type = RollingFile +appender.search_request_slowlog_log_appender.name = search_request_slowlog_log_appender +appender.search_request_slowlog_log_appender.fileName = ${sys:opensearch.logs.base_path}${sys:file.separator}${sys:opensearch.logs.cluster_name}\ + _index_search_slowlog.log +appender.search_request_slowlog_log_appender.filePermissions = rw-r----- +appender.search_request_slowlog_log_appender.layout.type = PatternLayout +appender.search_request_slowlog_log_appender.layout.pattern = [%d{ISO8601}][%-5p][%c{1.}] [%node_name]%marker %m%n + +appender.search_request_slowlog_log_appender.filePattern = ${sys:opensearch.logs.base_path}${sys:file.separator}${sys:opensearch.logs.cluster_name}\ + _index_search_slowlog-%i.log.gz +appender.search_request_slowlog_log_appender.policies.type = Policies +appender.search_request_slowlog_log_appender.policies.size.type = SizeBasedTriggeringPolicy +appender.search_request_slowlog_log_appender.policies.size.size = 1GB +appender.search_request_slowlog_log_appender.strategy.type = DefaultRolloverStrategy +appender.search_request_slowlog_log_appender.strategy.max = 4 +################################################# +logger.search_request_slowlog_logger.name = cluster.search.request.slowlog +logger.search_request_slowlog_logger.level = trace +logger.search_request_slowlog_logger.appenderRef.search_request_slowlog_json_appender.ref = search_request_slowlog_json_appender +logger.search_request_slowlog_logger.appenderRef.search_request_slowlog_log_appender.ref = search_request_slowlog_log_appender +logger.search_request_slowlog_logger.additivity = false + ######## Search slowlog JSON #################### appender.index_search_slowlog_rolling.type = RollingFile appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling diff --git a/distribution/tools/launchers/build.gradle b/distribution/tools/launchers/build.gradle index e75267f7c4a74..aee205a24dea3 100644 --- a/distribution/tools/launchers/build.gradle +++ b/distribution/tools/launchers/build.gradle @@ -39,7 +39,7 @@ dependencies { } base { - archivesBaseName = 'opensearch-launchers' + archivesName = 'opensearch-launchers' } tasks.withType(CheckForbiddenApis).configureEach { diff --git a/distribution/tools/plugin-cli/build.gradle b/distribution/tools/plugin-cli/build.gradle index b61a00aba04bc..f40fb1c4b0a9f 100644 --- a/distribution/tools/plugin-cli/build.gradle +++ b/distribution/tools/plugin-cli/build.gradle @@ -31,7 +31,7 @@ apply plugin: 'opensearch.build' base { - archivesBaseName = 'opensearch-plugin-cli' + archivesName = 'opensearch-plugin-cli' } dependencies { diff --git a/distribution/tools/upgrade-cli/build.gradle b/distribution/tools/upgrade-cli/build.gradle index 99824463f14f8..92c043132c021 100644 --- a/distribution/tools/upgrade-cli/build.gradle +++ b/distribution/tools/upgrade-cli/build.gradle @@ -10,7 +10,7 @@ apply plugin: 'opensearch.build' base { - archivesBaseName = 'opensearch-upgrade-cli' + archivesName = 'opensearch-upgrade-cli' } dependencies { diff --git a/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.15.2.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.15.2.jar.sha1 deleted file mode 100644 index f63416ddb8ceb..0000000000000 --- a/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4724a65ac8e8d156a24898d50fd5dbd3642870b8 \ No newline at end of file diff --git a/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.16.0.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.16.0.jar.sha1 new file mode 100644 index 0000000000000..79ed9e0c63fc8 --- /dev/null +++ b/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.16.0.jar.sha1 @@ -0,0 +1 @@ +dc30995f7428c0a405eba9b8c619b20d2b3b9905 \ No newline at end of file diff --git a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.15.2.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-databind-2.15.2.jar.sha1 deleted file mode 100644 index f16d80af8dce6..0000000000000 --- a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9353b021f10c307c00328f52090de2bdb4b6ff9c \ No newline at end of file diff --git a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.16.0.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-databind-2.16.0.jar.sha1 new file mode 100644 index 0000000000000..da00d281934b1 --- /dev/null +++ b/distribution/tools/upgrade-cli/licenses/jackson-databind-2.16.0.jar.sha1 @@ -0,0 +1 @@ +3a6b7f8ff7b30d518bbd65678e9c30cd881f19a7 \ No newline at end of file diff --git a/docs/build.gradle b/docs/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/docs/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar index 7f93135c49b76..d64cd4917707c 100644 Binary files a/gradle/wrapper/gradle-wrapper.jar and b/gradle/wrapper/gradle-wrapper.jar differ diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index adfb521550eb9..f1d76d80bbfa3 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -11,7 +11,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-8.4-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-8.5-all.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionSha256Sum=f2b9ed0faf8472cbe469255ae6c86eddb77076c75191741b4a462f33128dd419 +distributionSha256Sum=c16d517b50dd28b3f5838f0e844b7520b8f1eb610f2f29de7e4e04a1b7c9c79b diff --git a/libs/common/build.gradle b/libs/common/build.gradle index 973fe30d09842..4f89b81636420 100644 --- a/libs/common/build.gradle +++ b/libs/common/build.gradle @@ -14,7 +14,7 @@ import org.opensearch.gradle.info.BuildParams apply plugin: 'opensearch.publish' base { - archivesBaseName = 'opensearch-common' + archivesName = 'opensearch-common' } dependencies { diff --git a/libs/common/src/main/java/org/opensearch/common/Booleans.java b/libs/common/src/main/java/org/opensearch/common/Booleans.java index 2ca061820b2eb..ab7ad37e92612 100644 --- a/libs/common/src/main/java/org/opensearch/common/Booleans.java +++ b/libs/common/src/main/java/org/opensearch/common/Booleans.java @@ -45,30 +45,72 @@ private Booleans() { /** * Parses a char[] representation of a boolean value to boolean. * - * @return true iff the sequence of chars is "true", false iff the sequence of chars is "false" or the - * provided default value iff either text is null or length == 0. + * @return true iff the sequence of chars is "true", false iff the sequence of + * chars is "false" or the provided default value iff either text is null or length == 0. * @throws IllegalArgumentException if the string cannot be parsed to boolean. */ public static boolean parseBoolean(char[] text, int offset, int length, boolean defaultValue) { - if (text == null || length == 0) { + if (text == null) { return defaultValue; - } else { - return parseBoolean(new String(text, offset, length)); } + + switch (length) { + case 0: + return defaultValue; + case 1: + case 2: + case 3: + default: + break; + case 4: + if (text[offset] == 't' && text[offset + 1] == 'r' && text[offset + 2] == 'u' && text[offset + 3] == 'e') { + return true; + } + break; + case 5: + if (text[offset] == 'f' + && text[offset + 1] == 'a' + && text[offset + 2] == 'l' + && text[offset + 3] == 's' + && text[offset + 4] == 'e') { + return false; + } + break; + } + + throw new IllegalArgumentException( + "Failed to parse value [" + new String(text, offset, length) + "] as only [true] or [false] are allowed." + ); } /** - * returns true iff the sequence of chars is one of "true","false". + * Returns true iff the sequence of chars is one of "true", "false". * * @param text sequence to check * @param offset offset to start * @param length length to check */ public static boolean isBoolean(char[] text, int offset, int length) { - if (text == null || length == 0) { + if (text == null) { return false; } - return isBoolean(new String(text, offset, length)); + + switch (length) { + case 0: + case 1: + case 2: + case 3: + default: + return false; + case 4: + return text[offset] == 't' && text[offset + 1] == 'r' && text[offset + 2] == 'u' && text[offset + 3] == 'e'; + case 5: + return text[offset] == 'f' + && text[offset + 1] == 'a' + && text[offset + 2] == 'l' + && text[offset + 3] == 's' + && text[offset + 4] == 'e'; + } } public static boolean isBoolean(String value) { @@ -91,63 +133,45 @@ public static boolean parseBoolean(String value) { throw new IllegalArgumentException("Failed to parse value [" + value + "] as only [true] or [false] are allowed."); } - private static boolean hasText(CharSequence str) { - if (str == null || str.length() == 0) { - return false; - } - int strLen = str.length(); - for (int i = 0; i < strLen; i++) { - if (!Character.isWhitespace(str.charAt(i))) { - return true; - } - } - return false; - } - /** + * Parses a string representation of a boolean value to boolean. + * Note the subtle difference between this and {@link #parseBoolean(char[], int, int, boolean)}; this returns the + * default value even when the value is non-zero length containing all whitespaces (possibly overlooked, but + * preserving this behavior for compatibility reasons). Use {@link #parseBooleanStrict(String, boolean)} instead. * * @param value text to parse. - * @param defaultValue The default value to return if the provided value is null. + * @param defaultValue The default value to return if the provided value is null or blank. * @return see {@link #parseBoolean(String)} */ + @Deprecated public static boolean parseBoolean(String value, boolean defaultValue) { - if (hasText(value)) { - return parseBoolean(value); - } - return defaultValue; - } - - public static Boolean parseBoolean(String value, Boolean defaultValue) { - if (hasText(value)) { - return parseBoolean(value); + if (value == null || value.isBlank()) { + return defaultValue; } - return defaultValue; + return parseBoolean(value); } - /** - * Returns {@code false} if text is in "false", "0", "off", "no"; else, {@code true}. - * - * @deprecated Only kept to provide automatic upgrades for pre 6.0 indices. Use {@link #parseBoolean(String, Boolean)} instead. - */ @Deprecated - public static Boolean parseBooleanLenient(String value, Boolean defaultValue) { - if (value == null) { // only for the null case we do that here! + public static Boolean parseBoolean(String value, Boolean defaultValue) { + if (value == null || value.isBlank()) { return defaultValue; } - return parseBooleanLenient(value, false); + return parseBoolean(value); } /** - * Returns {@code false} if text is in "false", "0", "off", "no"; else, {@code true}. + * Parses a string representation of a boolean value to boolean. + * Analogous to {@link #parseBoolean(char[], int, int, boolean)}. * - * @deprecated Only kept to provide automatic upgrades for pre 6.0 indices. Use {@link #parseBoolean(String, boolean)} instead. + * @return true iff the sequence of chars is "true", false iff the sequence of + * chars is "false", or the provided default value iff either text is null or length == 0. + * @throws IllegalArgumentException if the string cannot be parsed to boolean. */ - @Deprecated - public static boolean parseBooleanLenient(String value, boolean defaultValue) { - if (value == null) { + public static boolean parseBooleanStrict(String value, boolean defaultValue) { + if (value == null || value.length() == 0) { return defaultValue; } - return !(value.equals("false") || value.equals("0") || value.equals("off") || value.equals("no")); + return parseBoolean(value); } /** @@ -163,71 +187,4 @@ public static boolean isFalse(String value) { public static boolean isTrue(String value) { return "true".equals(value); } - - /** - * Returns {@code false} if text is in "false", "0", "off", "no"; else, {@code true}. - * - * @deprecated Only kept to provide automatic upgrades for pre 6.0 indices. Use {@link #parseBoolean(char[], int, int, boolean)} instead - */ - @Deprecated - public static boolean parseBooleanLenient(char[] text, int offset, int length, boolean defaultValue) { - if (text == null || length == 0) { - return defaultValue; - } - if (length == 1) { - return text[offset] != '0'; - } - if (length == 2) { - return !(text[offset] == 'n' && text[offset + 1] == 'o'); - } - if (length == 3) { - return !(text[offset] == 'o' && text[offset + 1] == 'f' && text[offset + 2] == 'f'); - } - if (length == 5) { - return !(text[offset] == 'f' - && text[offset + 1] == 'a' - && text[offset + 2] == 'l' - && text[offset + 3] == 's' - && text[offset + 4] == 'e'); - } - return true; - } - - /** - * returns true if the a sequence of chars is one of "true","false","on","off","yes","no","0","1" - * - * @param text sequence to check - * @param offset offset to start - * @param length length to check - * - * @deprecated Only kept to provide automatic upgrades for pre 6.0 indices. Use {@link #isBoolean(char[], int, int)} instead. - */ - @Deprecated - public static boolean isBooleanLenient(char[] text, int offset, int length) { - if (text == null || length == 0) { - return false; - } - if (length == 1) { - return text[offset] == '0' || text[offset] == '1'; - } - if (length == 2) { - return (text[offset] == 'n' && text[offset + 1] == 'o') || (text[offset] == 'o' && text[offset + 1] == 'n'); - } - if (length == 3) { - return (text[offset] == 'o' && text[offset + 1] == 'f' && text[offset + 2] == 'f') - || (text[offset] == 'y' && text[offset + 1] == 'e' && text[offset + 2] == 's'); - } - if (length == 4) { - return (text[offset] == 't' && text[offset + 1] == 'r' && text[offset + 2] == 'u' && text[offset + 3] == 'e'); - } - if (length == 5) { - return (text[offset] == 'f' - && text[offset + 1] == 'a' - && text[offset + 2] == 'l' - && text[offset + 3] == 's' - && text[offset + 4] == 'e'); - } - return false; - } - } diff --git a/libs/common/src/main/java/org/opensearch/common/Explicit.java b/libs/common/src/main/java/org/opensearch/common/Explicit.java index e060baf6f187e..da44c6fd4dcef 100644 --- a/libs/common/src/main/java/org/opensearch/common/Explicit.java +++ b/libs/common/src/main/java/org/opensearch/common/Explicit.java @@ -32,6 +32,8 @@ package org.opensearch.common; +import org.opensearch.common.annotation.PublicApi; + import java.util.Objects; /** @@ -43,8 +45,9 @@ * field mapping settings it is preferable to preserve an explicit * choice rather than a choice made only made implicitly by defaults. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class Explicit { private final T value; diff --git a/libs/common/src/main/java/org/opensearch/common/Nullable.java b/libs/common/src/main/java/org/opensearch/common/Nullable.java index c663ef863ed48..70db2a3755eba 100644 --- a/libs/common/src/main/java/org/opensearch/common/Nullable.java +++ b/libs/common/src/main/java/org/opensearch/common/Nullable.java @@ -32,6 +32,8 @@ package org.opensearch.common; +import org.opensearch.common.annotation.PublicApi; + import javax.annotation.CheckForNull; import javax.annotation.meta.TypeQualifierNickname; @@ -53,5 +55,6 @@ @CheckForNull @Retention(RetentionPolicy.RUNTIME) @Target({ ElementType.PARAMETER, ElementType.FIELD, ElementType.METHOD }) +@PublicApi(since = "1.0.0") public @interface Nullable { } diff --git a/libs/common/src/main/java/org/opensearch/common/SuppressForbidden.java b/libs/common/src/main/java/org/opensearch/common/SuppressForbidden.java index 1f1b28bcf6759..c479d7bd98e8a 100644 --- a/libs/common/src/main/java/org/opensearch/common/SuppressForbidden.java +++ b/libs/common/src/main/java/org/opensearch/common/SuppressForbidden.java @@ -31,6 +31,8 @@ package org.opensearch.common; +import org.opensearch.common.annotation.PublicApi; + import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; @@ -43,6 +45,7 @@ */ @Retention(RetentionPolicy.CLASS) @Target({ ElementType.CONSTRUCTOR, ElementType.FIELD, ElementType.METHOD, ElementType.TYPE }) +@PublicApi(since = "1.0.0") public @interface SuppressForbidden { String reason(); } diff --git a/libs/common/src/main/java/org/opensearch/common/annotation/processor/ApiAnnotationProcessor.java b/libs/common/src/main/java/org/opensearch/common/annotation/processor/ApiAnnotationProcessor.java new file mode 100644 index 0000000000000..1864aec4aa951 --- /dev/null +++ b/libs/common/src/main/java/org/opensearch/common/annotation/processor/ApiAnnotationProcessor.java @@ -0,0 +1,369 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.DeprecatedApi; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.annotation.InternalApi; +import org.opensearch.common.annotation.PublicApi; + +import javax.annotation.processing.AbstractProcessor; +import javax.annotation.processing.RoundEnvironment; +import javax.annotation.processing.SupportedAnnotationTypes; +import javax.lang.model.AnnotatedConstruct; +import javax.lang.model.SourceVersion; +import javax.lang.model.element.AnnotationMirror; +import javax.lang.model.element.Element; +import javax.lang.model.element.ElementKind; +import javax.lang.model.element.ExecutableElement; +import javax.lang.model.element.Modifier; +import javax.lang.model.element.PackageElement; +import javax.lang.model.element.TypeElement; +import javax.lang.model.element.TypeParameterElement; +import javax.lang.model.element.VariableElement; +import javax.lang.model.type.ArrayType; +import javax.lang.model.type.DeclaredType; +import javax.lang.model.type.ReferenceType; +import javax.lang.model.type.TypeMirror; +import javax.lang.model.type.TypeVariable; +import javax.lang.model.type.WildcardType; +import javax.tools.Diagnostic.Kind; + +import java.util.HashSet; +import java.util.Set; + +/** + * The annotation processor for API related annotations: {@link DeprecatedApi}, {@link ExperimentalApi}, + * {@link InternalApi} and {@link PublicApi}. + *

+ * The checks are built on top of the following rules: + *

    + *
  • introspect each type annotated with {@link PublicApi}, {@link DeprecatedApi} or {@link ExperimentalApi}, + * filtering out package-private declarations
  • + *
  • make sure those leak only {@link PublicApi}, {@link DeprecatedApi} or {@link ExperimentalApi} types as well (exceptions, + * method return values, method arguments, method generic type arguments, class generic type arguments, annotations)
  • + *
  • recursively follow the type introspection chains to enforce the rules down the line
  • + *
+ */ +@InternalApi +@SupportedAnnotationTypes("org.opensearch.common.annotation.*") +public class ApiAnnotationProcessor extends AbstractProcessor { + private static final String OPTION_CONTINUE_ON_FAILING_CHECKS = "continueOnFailingChecks"; + private static final String OPENSEARCH_PACKAGE = "org.opensearch"; + + private final Set reported = new HashSet<>(); + private final Set processed = new HashSet<>(); + private Kind reportFailureAs = Kind.ERROR; + + @Override + public SourceVersion getSupportedSourceVersion() { + return SourceVersion.latest(); + } + + @Override + public Set getSupportedOptions() { + return Set.of(OPTION_CONTINUE_ON_FAILING_CHECKS); + } + + @Override + public boolean process(Set annotations, RoundEnvironment round) { + processingEnv.getMessager().printMessage(Kind.NOTE, "Processing OpenSearch Api annotations"); + + if (processingEnv.getOptions().containsKey(OPTION_CONTINUE_ON_FAILING_CHECKS) == true) { + reportFailureAs = Kind.NOTE; + } + + final Set elements = round.getElementsAnnotatedWithAny( + Set.of(PublicApi.class, ExperimentalApi.class, DeprecatedApi.class) + ); + + for (var element : elements) { + if (!checkPackage(element)) { + continue; + } + + // Skip all not-public elements + checkPublicVisibility(null, element); + + if (element instanceof TypeElement) { + process((TypeElement) element); + } + } + + return false; + } + + /** + * Check top level executable element + * @param executable top level executable element + * @param enclosing enclosing element + */ + private void process(ExecutableElement executable, Element enclosing) { + if (!inspectable(executable)) { + return; + } + + // The executable element should not be internal (unless constructor for injectable core component) + checkNotInternal(enclosing, executable); + + // Check this elements annotations + for (final AnnotationMirror annotation : executable.getAnnotationMirrors()) { + final Element element = annotation.getAnnotationType().asElement(); + if (inspectable(element)) { + checkNotInternal(executable.getEnclosingElement(), element); + checkPublic(executable.getEnclosingElement(), element); + } + } + + // Process method return types + final TypeMirror returnType = executable.getReturnType(); + if (returnType instanceof ReferenceType) { + process(executable, (ReferenceType) returnType); + } + + // Process method thrown types + for (final TypeMirror thrownType : executable.getThrownTypes()) { + if (thrownType instanceof ReferenceType) { + process(executable, (ReferenceType) thrownType); + } + } + + // Process method type parameters + for (final TypeParameterElement typeParameter : executable.getTypeParameters()) { + for (final TypeMirror boundType : typeParameter.getBounds()) { + if (boundType instanceof ReferenceType) { + process(executable, (ReferenceType) boundType); + } + } + } + + // Process method arguments + for (final VariableElement parameter : executable.getParameters()) { + final TypeMirror parameterType = parameter.asType(); + if (parameterType instanceof ReferenceType) { + process(executable, (ReferenceType) parameterType); + } + } + } + + /** + * Check wildcard type bounds referred by an element + * @param executable element + * @param type wildcard type + */ + private void process(ExecutableElement executable, WildcardType type) { + if (type.getExtendsBound() instanceof ReferenceType) { + process(executable, (ReferenceType) type.getExtendsBound()); + } + + if (type.getSuperBound() instanceof ReferenceType) { + process(executable, (ReferenceType) type.getSuperBound()); + } + } + + /** + * Check reference type bounds referred by an executable element + * @param executable executable element + * @param ref reference type + */ + private void process(ExecutableElement executable, ReferenceType ref) { + // The element has been processed already + if (processed.add(ref) == false) { + return; + } + + if (ref instanceof DeclaredType) { + final DeclaredType declaredType = (DeclaredType) ref; + + final Element element = declaredType.asElement(); + if (inspectable(element)) { + checkNotInternal(executable.getEnclosingElement(), element); + checkPublic(executable.getEnclosingElement(), element); + } + + for (final TypeMirror type : declaredType.getTypeArguments()) { + if (type instanceof ReferenceType) { + process(executable, (ReferenceType) type); + } else if (type instanceof WildcardType) { + process(executable, (WildcardType) type); + } + } + } else if (ref instanceof ArrayType) { + final TypeMirror componentType = ((ArrayType) ref).getComponentType(); + if (componentType instanceof ReferenceType) { + process(executable, (ReferenceType) componentType); + } + } else if (ref instanceof TypeVariable) { + final TypeVariable typeVariable = (TypeVariable) ref; + if (typeVariable.getUpperBound() instanceof ReferenceType) { + process(executable, (ReferenceType) typeVariable.getUpperBound()); + } + if (typeVariable.getLowerBound() instanceof ReferenceType) { + process(executable, (ReferenceType) typeVariable.getLowerBound()); + } + } + + // Check this elements annotations + for (final AnnotationMirror annotation : ref.getAnnotationMirrors()) { + final Element element = annotation.getAnnotationType().asElement(); + if (inspectable(element)) { + checkNotInternal(executable.getEnclosingElement(), element); + checkPublic(executable.getEnclosingElement(), element); + } + } + } + + /** + * Check if a particular executable element should be inspected or not + * @param executable executable element to inspect + * @return {@code true} if a particular executable element should be inspected, {@code false} otherwise + */ + private boolean inspectable(ExecutableElement executable) { + // The constructors for public APIs could use non-public APIs when those are supposed to be only + // consumed (not instantiated) by external consumers. + return executable.getKind() != ElementKind.CONSTRUCTOR && executable.getModifiers().contains(Modifier.PUBLIC); + } + + /** + * Check if a particular element should be inspected or not + * @param element element to inspect + * @return {@code true} if a particular element should be inspected, {@code false} otherwise + */ + private boolean inspectable(Element element) { + final PackageElement pckg = processingEnv.getElementUtils().getPackageOf(element); + return pckg.getQualifiedName().toString().startsWith(OPENSEARCH_PACKAGE); + } + + /** + * Check if a particular element belongs to OpenSeach managed packages + * @param element element to inspect + * @return {@code true} if a particular element belongs to OpenSeach managed packages, {@code false} otherwise + */ + private boolean checkPackage(Element element) { + // The element was reported already + if (reported.contains(element)) { + return false; + } + + final PackageElement pckg = processingEnv.getElementUtils().getPackageOf(element); + final boolean belongsToOpenSearch = pckg.getQualifiedName().toString().startsWith(OPENSEARCH_PACKAGE); + + if (!belongsToOpenSearch) { + reported.add(element); + + processingEnv.getMessager() + .printMessage( + reportFailureAs, + "The type " + + element + + " is not residing in " + + OPENSEARCH_PACKAGE + + ".* package " + + "and should not be annotated as OpenSearch APIs." + ); + } + + return belongsToOpenSearch; + } + + /** + * Check the fields, methods, constructors, and member types that are directly + * declared in this class or interface. + * @param element class or interface + */ + private void process(Element element) { + // Check the fields, methods, constructors, and member types that are directly + // declared in this class or interface. + for (final Element enclosed : element.getEnclosedElements()) { + // Skip all not-public elements + if (!enclosed.getModifiers().contains(Modifier.PUBLIC)) { + continue; + } + + if (enclosed instanceof ExecutableElement) { + process((ExecutableElement) enclosed, element); + } + } + } + + /** + * Check if element is public and annotated with {@link PublicApi}, {@link DeprecatedApi} or {@link ExperimentalApi} + * @param referencedBy the referrer for the element + * @param element element to check + */ + private void checkPublic(@Nullable Element referencedBy, final Element element) { + // The element was reported already + if (reported.contains(element)) { + return; + } + + checkPublicVisibility(referencedBy, element); + + if (element.getAnnotation(PublicApi.class) == null + && element.getAnnotation(ExperimentalApi.class) == null + && element.getAnnotation(DeprecatedApi.class) == null) { + reported.add(element); + + processingEnv.getMessager() + .printMessage( + reportFailureAs, + "The element " + + element + + " is part of the public APIs but is not maked as @PublicApi, @ExperimentalApi or @DeprecatedApi" + + ((referencedBy != null) ? " (referenced by " + referencedBy + ") " : "") + ); + } + } + + /** + * Check if element has public visibility (following Java visibility rules) + * @param referencedBy the referrer for the element + * @param element element to check + */ + private void checkPublicVisibility(Element referencedBy, final Element element) { + if (!element.getModifiers().contains(Modifier.PUBLIC) && !element.getModifiers().contains(Modifier.PROTECTED)) { + reported.add(element); + + processingEnv.getMessager() + .printMessage( + reportFailureAs, + "The element " + + element + + " is part of the public APIs but does not have public or protected visibility" + + ((referencedBy != null) ? " (referenced by " + referencedBy + ") " : "") + ); + } + } + + /** + * Check if element is not annotated with {@link InternalApi} + * @param referencedBy the referrer for the element + * @param element element to check + */ + private void checkNotInternal(@Nullable Element referencedBy, final Element element) { + // The element was reported already + if (reported.contains(element)) { + return; + } + + if (element.getAnnotation(InternalApi.class) != null) { + reported.add(element); + + processingEnv.getMessager() + .printMessage( + reportFailureAs, + "The element " + + element + + " is part of the public APIs but is marked as @InternalApi" + + ((referencedBy != null) ? " (referenced by " + referencedBy + ") " : "") + ); + } + } +} diff --git a/libs/common/src/main/java/org/opensearch/common/annotation/processor/package-info.java b/libs/common/src/main/java/org/opensearch/common/annotation/processor/package-info.java new file mode 100644 index 0000000000000..fa23e4a7addce --- /dev/null +++ b/libs/common/src/main/java/org/opensearch/common/annotation/processor/package-info.java @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Classes related yo OpenSearch API annotation processing + * + * @opensearch.internal + */ +@org.opensearch.common.annotation.InternalApi +package org.opensearch.common.annotation.processor; diff --git a/libs/common/src/main/java/org/opensearch/common/lease/Releasable.java b/libs/common/src/main/java/org/opensearch/common/lease/Releasable.java index 30bea6185febc..dfc4fefb9ee55 100644 --- a/libs/common/src/main/java/org/opensearch/common/lease/Releasable.java +++ b/libs/common/src/main/java/org/opensearch/common/lease/Releasable.java @@ -32,13 +32,16 @@ package org.opensearch.common.lease; +import org.opensearch.common.annotation.PublicApi; + import java.io.Closeable; /** * Specialization of {@link AutoCloseable} for calls that might not throw a checked exception. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface Releasable extends Closeable { @Override diff --git a/libs/common/src/main/java/org/opensearch/common/round/BidirectionalLinearSearcher.java b/libs/common/src/main/java/org/opensearch/common/round/BidirectionalLinearSearcher.java new file mode 100644 index 0000000000000..5c3dcf2bd4708 --- /dev/null +++ b/libs/common/src/main/java/org/opensearch/common/round/BidirectionalLinearSearcher.java @@ -0,0 +1,59 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.round; + +import org.opensearch.common.annotation.InternalApi; + +/** + * It uses linear search on a sorted array of pre-computed round-down points. + * For small inputs (≤ 64 elements), this can be much faster than binary search as it avoids the penalty of + * branch mispredictions and pipeline stalls, and accesses memory sequentially. + * + *

+ * It uses "meet in the middle" linear search to avoid the worst case scenario when the desired element is present + * at either side of the array. This is helpful for time-series data where velocity increases over time, so more + * documents are likely to find a greater timestamp which is likely to be present on the right end of the array. + * + * @opensearch.internal + */ +@InternalApi +class BidirectionalLinearSearcher implements Roundable { + private final long[] ascending; + private final long[] descending; + + BidirectionalLinearSearcher(long[] values, int size) { + if (size <= 0) { + throw new IllegalArgumentException("at least one value must be present"); + } + + int len = (size + 1) >>> 1; // rounded-up to handle odd number of values + ascending = new long[len]; + descending = new long[len]; + + for (int i = 0; i < len; i++) { + ascending[i] = values[i]; + descending[i] = values[size - i - 1]; + } + } + + @Override + public long floor(long key) { + int i = 0; + for (; i < ascending.length; i++) { + if (descending[i] <= key) { + return descending[i]; + } + if (ascending[i] > key) { + assert i > 0 : "key must be greater than or equal to " + ascending[0]; + return ascending[i - 1]; + } + } + return ascending[i - 1]; + } +} diff --git a/libs/common/src/main/java/org/opensearch/common/round/BinarySearcher.java b/libs/common/src/main/java/org/opensearch/common/round/BinarySearcher.java new file mode 100644 index 0000000000000..b9d76945115ed --- /dev/null +++ b/libs/common/src/main/java/org/opensearch/common/round/BinarySearcher.java @@ -0,0 +1,43 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.round; + +import org.opensearch.common.annotation.InternalApi; + +import java.util.Arrays; + +/** + * It uses binary search on a sorted array of pre-computed round-down points. + * + * @opensearch.internal + */ +@InternalApi +class BinarySearcher implements Roundable { + private final long[] values; + private final int size; + + BinarySearcher(long[] values, int size) { + if (size <= 0) { + throw new IllegalArgumentException("at least one value must be present"); + } + + this.values = values; + this.size = size; + } + + @Override + public long floor(long key) { + int idx = Arrays.binarySearch(values, 0, size, key); + assert idx != -1 : "key must be greater than or equal to " + values[0]; + if (idx < 0) { + idx = -2 - idx; + } + return values[idx]; + } +} diff --git a/libs/common/src/main/java/org/opensearch/common/round/Roundable.java b/libs/common/src/main/java/org/opensearch/common/round/Roundable.java new file mode 100644 index 0000000000000..ae6f9b787c1e9 --- /dev/null +++ b/libs/common/src/main/java/org/opensearch/common/round/Roundable.java @@ -0,0 +1,28 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.round; + +import org.opensearch.common.annotation.InternalApi; + +/** + * Interface to round-off values. + * + * @opensearch.internal + */ +@InternalApi +@FunctionalInterface +public interface Roundable { + /** + * Returns the greatest lower bound of the given key. + * In other words, it returns the largest value such that {@code value <= key}. + * @param key to floor + * @return the floored value + */ + long floor(long key); +} diff --git a/libs/common/src/main/java/org/opensearch/common/round/RoundableFactory.java b/libs/common/src/main/java/org/opensearch/common/round/RoundableFactory.java new file mode 100644 index 0000000000000..b7422694c3013 --- /dev/null +++ b/libs/common/src/main/java/org/opensearch/common/round/RoundableFactory.java @@ -0,0 +1,39 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.round; + +import org.opensearch.common.annotation.InternalApi; + +/** + * Factory class to create and return the fastest implementation of {@link Roundable}. + * + * @opensearch.internal + */ +@InternalApi +public final class RoundableFactory { + /** + * The maximum limit up to which linear search is used, otherwise binary search is used. + * This is because linear search is much faster on small arrays. + * Benchmark results: PR #9727 + */ + private static final int LINEAR_SEARCH_MAX_SIZE = 64; + + private RoundableFactory() {} + + /** + * Creates and returns the fastest implementation of {@link Roundable}. + */ + public static Roundable create(long[] values, int size) { + if (size <= LINEAR_SEARCH_MAX_SIZE) { + return new BidirectionalLinearSearcher(values, size); + } else { + return new BinarySearcher(values, size); + } + } +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/http/package-info.java b/libs/common/src/main/java/org/opensearch/common/round/package-info.java similarity index 72% rename from libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/http/package-info.java rename to libs/common/src/main/java/org/opensearch/common/round/package-info.java index 9feb862a4e010..e79c4017de31b 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/http/package-info.java +++ b/libs/common/src/main/java/org/opensearch/common/round/package-info.java @@ -7,6 +7,6 @@ */ /** - * Contains No-op implementations + * Contains classes to round-off values. */ -package org.opensearch.telemetry.tracing.http; +package org.opensearch.common.round; diff --git a/libs/common/src/main/resources/META-INF/services/javax.annotation.processing.Processor b/libs/common/src/main/resources/META-INF/services/javax.annotation.processing.Processor new file mode 100644 index 0000000000000..c4e4dfed864f2 --- /dev/null +++ b/libs/common/src/main/resources/META-INF/services/javax.annotation.processing.Processor @@ -0,0 +1,12 @@ +# +# SPDX-License-Identifier: Apache-2.0 +# +# The OpenSearch Contributors require contributions made to +# this file be licensed under the Apache-2.0 license or a +# compatible open source license. +# +# Modifications Copyright OpenSearch Contributors. See +# GitHub history for details. +# + +org.opensearch.common.annotation.processor.ApiAnnotationProcessor \ No newline at end of file diff --git a/server/src/test/java/org/opensearch/common/BooleansTests.java b/libs/common/src/test/java/org/opensearch/common/BooleansTests.java similarity index 58% rename from server/src/test/java/org/opensearch/common/BooleansTests.java rename to libs/common/src/test/java/org/opensearch/common/BooleansTests.java index 7e4a0ad8e456b..578ec742d126d 100644 --- a/server/src/test/java/org/opensearch/common/BooleansTests.java +++ b/libs/common/src/test/java/org/opensearch/common/BooleansTests.java @@ -34,10 +34,7 @@ import org.opensearch.test.OpenSearchTestCase; -import java.util.Locale; - import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.nullValue; public class BooleansTests extends OpenSearchTestCase { private static final String[] NON_BOOLEANS = new String[] { @@ -81,8 +78,23 @@ public void testParseBooleanWithFallback() { assertFalse(Booleans.parseBoolean(null, Boolean.FALSE)); assertTrue(Booleans.parseBoolean(null, Boolean.TRUE)); + assertFalse(Booleans.parseBoolean("", false)); + assertTrue(Booleans.parseBoolean("", true)); + assertNull(Booleans.parseBoolean("", null)); + assertFalse(Booleans.parseBoolean("", Boolean.FALSE)); + assertTrue(Booleans.parseBoolean("", Boolean.TRUE)); + + assertFalse(Booleans.parseBoolean(" \t\n", false)); + assertTrue(Booleans.parseBoolean(" \t\n", true)); + assertNull(Booleans.parseBoolean(" \t\n", null)); + assertFalse(Booleans.parseBoolean(" \t\n", Boolean.FALSE)); + assertTrue(Booleans.parseBoolean(" \t\n", Boolean.TRUE)); + assertTrue(Booleans.parseBoolean("true", randomFrom(Boolean.TRUE, Boolean.FALSE, null))); assertFalse(Booleans.parseBoolean("false", randomFrom(Boolean.TRUE, Boolean.FALSE, null))); + + assertTrue(Booleans.parseBoolean(new char[0], 0, 0, true)); + assertFalse(Booleans.parseBoolean(new char[0], 0, 0, false)); } public void testParseNonBooleanWithFallback() { @@ -109,56 +121,12 @@ public void testParseNonBoolean() { } } - public void testIsBooleanLenient() { - String[] booleans = new String[] { "true", "false", "on", "off", "yes", "no", "0", "1" }; - String[] notBooleans = new String[] { "11", "00", "sdfsdfsf", "F", "T" }; - assertThat(Booleans.isBooleanLenient(null, 0, 1), is(false)); - - for (String b : booleans) { - String t = "prefix" + b + "suffix"; - assertTrue( - "failed to recognize [" + b + "] as boolean", - Booleans.isBooleanLenient(t.toCharArray(), "prefix".length(), b.length()) - ); - } - - for (String nb : notBooleans) { - String t = "prefix" + nb + "suffix"; - assertFalse("recognized [" + nb + "] as boolean", Booleans.isBooleanLenient(t.toCharArray(), "prefix".length(), nb.length())); - } - } - - public void testParseBooleanLenient() { - assertThat(Booleans.parseBooleanLenient(randomFrom("true", "on", "yes", "1"), randomBoolean()), is(true)); - assertThat(Booleans.parseBooleanLenient(randomFrom("false", "off", "no", "0"), randomBoolean()), is(false)); - assertThat(Booleans.parseBooleanLenient(randomFrom("true", "on", "yes").toUpperCase(Locale.ROOT), randomBoolean()), is(true)); - assertThat(Booleans.parseBooleanLenient(null, false), is(false)); - assertThat(Booleans.parseBooleanLenient(null, true), is(true)); - - assertThat( - Booleans.parseBooleanLenient(randomFrom("true", "on", "yes", "1"), randomFrom(Boolean.TRUE, Boolean.FALSE, null)), - is(true) - ); - assertThat( - Booleans.parseBooleanLenient(randomFrom("false", "off", "no", "0"), randomFrom(Boolean.TRUE, Boolean.FALSE, null)), - is(false) - ); - assertThat( - Booleans.parseBooleanLenient( - randomFrom("true", "on", "yes").toUpperCase(Locale.ROOT), - randomFrom(Boolean.TRUE, Boolean.FALSE, null) - ), - is(true) - ); - assertThat(Booleans.parseBooleanLenient(null, Boolean.FALSE), is(false)); - assertThat(Booleans.parseBooleanLenient(null, Boolean.TRUE), is(true)); - assertThat(Booleans.parseBooleanLenient(null, null), nullValue()); - - char[] chars = randomFrom("true", "on", "yes", "1").toCharArray(); - assertThat(Booleans.parseBooleanLenient(chars, 0, chars.length, randomBoolean()), is(true)); - chars = randomFrom("false", "off", "no", "0").toCharArray(); - assertThat(Booleans.parseBooleanLenient(chars, 0, chars.length, randomBoolean()), is(false)); - chars = randomFrom("true", "on", "yes").toUpperCase(Locale.ROOT).toCharArray(); - assertThat(Booleans.parseBooleanLenient(chars, 0, chars.length, randomBoolean()), is(true)); + public void testParseBooleanStrict() { + assertTrue(Booleans.parseBooleanStrict("true", false)); + assertFalse(Booleans.parseBooleanStrict("false", true)); + assertTrue(Booleans.parseBooleanStrict(null, true)); + assertFalse(Booleans.parseBooleanStrict("", false)); + expectThrows(IllegalArgumentException.class, () -> Booleans.parseBooleanStrict("foobar", false)); + expectThrows(IllegalArgumentException.class, () -> Booleans.parseBooleanStrict(" \t\n", false)); } } diff --git a/libs/common/src/test/java/org/opensearch/common/annotation/processor/ApiAnnotationProcessorTests.java b/libs/common/src/test/java/org/opensearch/common/annotation/processor/ApiAnnotationProcessorTests.java new file mode 100644 index 0000000000000..df04709458b29 --- /dev/null +++ b/libs/common/src/test/java/org/opensearch/common/annotation/processor/ApiAnnotationProcessorTests.java @@ -0,0 +1,476 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.InternalApi; +import org.opensearch.test.OpenSearchTestCase; + +import javax.tools.Diagnostic; + +import static org.opensearch.common.annotation.processor.CompilerSupport.HasDiagnostic.matching; +import static org.hamcrest.CoreMatchers.not; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; + +@SuppressWarnings("deprecation") +public class ApiAnnotationProcessorTests extends OpenSearchTestCase implements CompilerSupport { + public void testPublicApiMethodArgumentNotAnnotated() { + final CompilerResult result = compile("PublicApiMethodArgumentNotAnnotated.java", "NotAnnotated.java"); + assertThat(result, instanceOf(Failure.class)); + + final Failure failure = (Failure) result; + assertThat(failure.diagnotics(), hasSize(3)); + + assertThat( + failure.diagnotics(), + hasItem( + matching( + Diagnostic.Kind.ERROR, + containsString( + "The element org.opensearch.common.annotation.processor.NotAnnotated is part of the public APIs but is not maked as @PublicApi, @ExperimentalApi or @DeprecatedApi " + + "(referenced by org.opensearch.common.annotation.processor.PublicApiMethodArgumentNotAnnotated)" + ) + ) + ) + ); + } + + public void testPublicApiMethodArgumentNotAnnotatedGenerics() { + final CompilerResult result = compile("PublicApiMethodArgumentNotAnnotatedGenerics.java", "NotAnnotated.java"); + assertThat(result, instanceOf(Failure.class)); + + final Failure failure = (Failure) result; + assertThat(failure.diagnotics(), hasSize(3)); + + assertThat( + failure.diagnotics(), + hasItem( + matching( + Diagnostic.Kind.ERROR, + containsString( + "The element org.opensearch.common.annotation.processor.NotAnnotated is part of the public APIs but is not maked as @PublicApi, @ExperimentalApi or @DeprecatedApi " + + "(referenced by org.opensearch.common.annotation.processor.PublicApiMethodArgumentNotAnnotatedGenerics)" + ) + ) + ) + ); + } + + public void testPublicApiMethodThrowsNotAnnotated() { + final CompilerResult result = compile("PublicApiMethodThrowsNotAnnotated.java", "PublicApiAnnotated.java"); + assertThat(result, instanceOf(Failure.class)); + + final Failure failure = (Failure) result; + assertThat(failure.diagnotics(), hasSize(3)); + + assertThat( + failure.diagnotics(), + hasItem( + matching( + Diagnostic.Kind.ERROR, + containsString( + "The element org.opensearch.common.annotation.processor.NotAnnotatedException is part of the public APIs but is not maked as @PublicApi, @ExperimentalApi or @DeprecatedApi " + + "(referenced by org.opensearch.common.annotation.processor.PublicApiMethodThrowsNotAnnotated)" + ) + ) + ) + ); + } + + public void testPublicApiMethodArgumentNotAnnotatedPackagePrivate() { + final CompilerResult result = compile("PublicApiMethodArgumentNotAnnotatedPackagePrivate.java"); + assertThat(result, instanceOf(Failure.class)); + + final Failure failure = (Failure) result; + assertThat(failure.diagnotics(), hasSize(4)); + + assertThat( + failure.diagnotics(), + hasItem( + matching( + Diagnostic.Kind.ERROR, + containsString( + "The element org.opensearch.common.annotation.processor.NotAnnotatedPackagePrivate is part of the public APIs but does not have public or protected visibility " + + "(referenced by org.opensearch.common.annotation.processor.PublicApiMethodArgumentNotAnnotatedPackagePrivate)" + ) + ) + ) + ); + + assertThat( + failure.diagnotics(), + hasItem( + matching( + Diagnostic.Kind.ERROR, + containsString( + "The element org.opensearch.common.annotation.processor.NotAnnotatedPackagePrivate is part of the public APIs but is not maked as @PublicApi, @ExperimentalApi or @DeprecatedApi " + + "(referenced by org.opensearch.common.annotation.processor.PublicApiMethodArgumentNotAnnotatedPackagePrivate)" + ) + ) + ) + ); + } + + public void testPublicApiMethodArgumentAnnotatedPackagePrivate() { + final CompilerResult result = compile("PublicApiMethodArgumentAnnotatedPackagePrivate.java"); + assertThat(result, instanceOf(Failure.class)); + + final Failure failure = (Failure) result; + assertThat(failure.diagnotics(), hasSize(3)); + + assertThat( + failure.diagnotics(), + hasItem( + matching( + Diagnostic.Kind.ERROR, + containsString( + "The element org.opensearch.common.annotation.processor.AnnotatedPackagePrivate is part of the public APIs but does not have public or protected visibility " + + "(referenced by org.opensearch.common.annotation.processor.PublicApiMethodArgumentAnnotatedPackagePrivate)" + ) + ) + ) + ); + } + + public void testPublicApiWithInternalApiMethod() { + final CompilerResult result = compile("PublicApiWithInternalApiMethod.java"); + assertThat(result, instanceOf(Failure.class)); + + final Failure failure = (Failure) result; + assertThat(failure.diagnotics(), hasSize(3)); + + assertThat( + failure.diagnotics(), + hasItem( + matching( + Diagnostic.Kind.ERROR, + containsString( + "The element method() is part of the public APIs but is marked as @InternalApi (referenced by org.opensearch.common.annotation.processor.PublicApiWithInternalApiMethod)" + ) + ) + ) + ); + } + + /** + * The constructor arguments have relaxed semantics at the moment: those could be not annotated or be annotated as {@link InternalApi} + */ + public void testPublicApiConstructorArgumentNotAnnotated() { + final CompilerResult result = compile("PublicApiConstructorArgumentNotAnnotated.java", "NotAnnotated.java"); + assertThat(result, instanceOf(Failure.class)); + + final Failure failure = (Failure) result; + assertThat(failure.diagnotics(), hasSize(2)); + + assertThat(failure.diagnotics(), not(hasItem(matching(Diagnostic.Kind.ERROR)))); + } + + /** + * The constructor arguments have relaxed semantics at the moment: those could be not annotated or be annotated as {@link InternalApi} + */ + public void testPublicApiConstructorArgumentAnnotatedInternalApi() { + final CompilerResult result = compile("PublicApiConstructorArgumentAnnotatedInternalApi.java", "InternalApiAnnotated.java"); + assertThat(result, instanceOf(Failure.class)); + + final Failure failure = (Failure) result; + assertThat(failure.diagnotics(), hasSize(2)); + + assertThat(failure.diagnotics(), not(hasItem(matching(Diagnostic.Kind.ERROR)))); + } + + public void testPublicApiWithExperimentalApiMethod() { + final CompilerResult result = compile("PublicApiWithExperimentalApiMethod.java"); + assertThat(result, instanceOf(Failure.class)); + + final Failure failure = (Failure) result; + assertThat(failure.diagnotics(), hasSize(2)); + + assertThat(failure.diagnotics(), not(hasItem(matching(Diagnostic.Kind.ERROR)))); + } + + public void testPublicApiMethodReturnNotAnnotated() { + final CompilerResult result = compile("PublicApiMethodReturnNotAnnotated.java", "NotAnnotated.java"); + assertThat(result, instanceOf(Failure.class)); + + final Failure failure = (Failure) result; + assertThat(failure.diagnotics(), hasSize(3)); + + assertThat( + failure.diagnotics(), + hasItem( + matching( + Diagnostic.Kind.ERROR, + containsString( + "The element org.opensearch.common.annotation.processor.NotAnnotated is part of the public APIs but is not maked as @PublicApi, @ExperimentalApi or @DeprecatedApi " + + "(referenced by org.opensearch.common.annotation.processor.PublicApiMethodReturnNotAnnotated)" + ) + ) + ) + ); + } + + public void testPublicApiMethodReturnNotAnnotatedGenerics() { + final CompilerResult result = compile("PublicApiMethodReturnNotAnnotatedGenerics.java", "NotAnnotated.java"); + assertThat(result, instanceOf(Failure.class)); + + final Failure failure = (Failure) result; + assertThat(failure.diagnotics(), hasSize(3)); + + assertThat( + failure.diagnotics(), + hasItem( + matching( + Diagnostic.Kind.ERROR, + containsString( + "The element org.opensearch.common.annotation.processor.NotAnnotated is part of the public APIs but is not maked as @PublicApi, @ExperimentalApi or @DeprecatedApi " + + "(referenced by org.opensearch.common.annotation.processor.PublicApiMethodReturnNotAnnotatedGenerics)" + ) + ) + ) + ); + } + + public void testPublicApiMethodReturnNotAnnotatedArray() { + final CompilerResult result = compile("PublicApiMethodReturnNotAnnotatedArray.java", "NotAnnotated.java"); + assertThat(result, instanceOf(Failure.class)); + + final Failure failure = (Failure) result; + assertThat(failure.diagnotics(), hasSize(3)); + + assertThat( + failure.diagnotics(), + hasItem( + matching( + Diagnostic.Kind.ERROR, + containsString( + "The element org.opensearch.common.annotation.processor.NotAnnotated is part of the public APIs but is not maked as @PublicApi, @ExperimentalApi or @DeprecatedApi " + + "(referenced by org.opensearch.common.annotation.processor.PublicApiMethodReturnNotAnnotatedArray)" + ) + ) + ) + ); + } + + public void testPublicApiMethodReturnNotAnnotatedBoundedGenerics() { + final CompilerResult result = compile("PublicApiMethodReturnNotAnnotatedBoundedGenerics.java", "NotAnnotated.java"); + assertThat(result, instanceOf(Failure.class)); + + final Failure failure = (Failure) result; + assertThat(failure.diagnotics(), hasSize(3)); + + assertThat( + failure.diagnotics(), + hasItem( + matching( + Diagnostic.Kind.ERROR, + containsString( + "The element org.opensearch.common.annotation.processor.NotAnnotated is part of the public APIs but is not maked as @PublicApi, @ExperimentalApi or @DeprecatedApi " + + "(referenced by org.opensearch.common.annotation.processor.PublicApiMethodReturnNotAnnotatedBoundedGenerics)" + ) + ) + ) + ); + } + + public void testPublicApiMethodReturnNotAnnotatedAnnotation() { + final CompilerResult result = compile( + "PublicApiMethodReturnNotAnnotatedAnnotation.java", + "PublicApiAnnotated.java", + "NotAnnotatedAnnotation.java" + ); + assertThat(result, instanceOf(Failure.class)); + + final Failure failure = (Failure) result; + assertThat(failure.diagnotics(), hasSize(3)); + + assertThat( + failure.diagnotics(), + hasItem( + matching( + Diagnostic.Kind.ERROR, + containsString( + "The element org.opensearch.common.annotation.processor.NotAnnotatedAnnotation is part of the public APIs but is not maked as @PublicApi, @ExperimentalApi or @DeprecatedApi " + + "(referenced by org.opensearch.common.annotation.processor.PublicApiMethodReturnNotAnnotatedAnnotation)" + ) + ) + ) + ); + } + + public void testPublicApiMethodReturnNotAnnotatedWildcardGenerics() { + final CompilerResult result = compile("PublicApiMethodReturnNotAnnotatedWildcardGenerics.java"); + assertThat(result, instanceOf(Failure.class)); + + final Failure failure = (Failure) result; + assertThat(failure.diagnotics(), hasSize(2)); + + assertThat(failure.diagnotics(), not(hasItem(matching(Diagnostic.Kind.ERROR)))); + } + + public void testPublicApiWithPackagePrivateMethod() { + final CompilerResult result = compile("PublicApiWithPackagePrivateMethod.java", "NotAnnotated.java"); + assertThat(result, instanceOf(Failure.class)); + + final Failure failure = (Failure) result; + assertThat(failure.diagnotics(), hasSize(2)); + + assertThat(failure.diagnotics(), not(hasItem(matching(Diagnostic.Kind.ERROR)))); + } + + public void testPublicApiMethodReturnSelf() { + final CompilerResult result = compile("PublicApiMethodReturnSelf.java"); + assertThat(result, instanceOf(Failure.class)); + + final Failure failure = (Failure) result; + assertThat(failure.diagnotics(), hasSize(2)); + + assertThat(failure.diagnotics(), not(hasItem(matching(Diagnostic.Kind.ERROR)))); + } + + public void testExperimentalApiMethodReturnSelf() { + final CompilerResult result = compile("ExperimentalApiMethodReturnSelf.java"); + assertThat(result, instanceOf(Failure.class)); + + final Failure failure = (Failure) result; + assertThat(failure.diagnotics(), hasSize(2)); + + assertThat(failure.diagnotics(), not(hasItem(matching(Diagnostic.Kind.ERROR)))); + } + + public void testDeprecatedApiMethodReturnSelf() { + final CompilerResult result = compile("DeprecatedApiMethodReturnSelf.java"); + assertThat(result, instanceOf(Failure.class)); + + final Failure failure = (Failure) result; + assertThat(failure.diagnotics(), hasSize(2)); + + assertThat(failure.diagnotics(), not(hasItem(matching(Diagnostic.Kind.ERROR)))); + } + + public void testPublicApiPackagePrivate() { + final CompilerResult result = compile("PublicApiPackagePrivate.java"); + assertThat(result, instanceOf(Failure.class)); + + final Failure failure = (Failure) result; + assertThat(failure.diagnotics(), hasSize(3)); + + assertThat( + failure.diagnotics(), + hasItem( + matching( + Diagnostic.Kind.ERROR, + containsString( + "The element org.opensearch.common.annotation.processor.PublicApiPackagePrivate is part of the public APIs but does not have public or protected visibility" + ) + ) + ) + ); + } + + public void testPublicApiMethodGenericsArgumentNotAnnotated() { + final CompilerResult result = compile("PublicApiMethodGenericsArgumentNotAnnotated.java", "NotAnnotated.java"); + assertThat(result, instanceOf(Failure.class)); + + final Failure failure = (Failure) result; + assertThat(failure.diagnotics(), hasSize(3)); + + assertThat( + failure.diagnotics(), + hasItem( + matching( + Diagnostic.Kind.ERROR, + containsString( + "The element org.opensearch.common.annotation.processor.NotAnnotated is part of the public APIs but is not maked as @PublicApi, @ExperimentalApi or @DeprecatedApi " + + "(referenced by org.opensearch.common.annotation.processor.PublicApiMethodGenericsArgumentNotAnnotated)" + ) + ) + ) + ); + } + + public void testPublicApiMethodReturnAnnotatedArray() { + final CompilerResult result = compile("PublicApiMethodReturnAnnotatedArray.java", "PublicApiAnnotated.java"); + assertThat(result, instanceOf(Failure.class)); + + final Failure failure = (Failure) result; + assertThat(failure.diagnotics(), hasSize(2)); + + assertThat(failure.diagnotics(), not(hasItem(matching(Diagnostic.Kind.ERROR)))); + } + + public void testPublicApiMethodGenericsArgumentAnnotated() { + final CompilerResult result = compile("PublicApiMethodGenericsArgumentAnnotated.java", "PublicApiAnnotated.java"); + assertThat(result, instanceOf(Failure.class)); + + final Failure failure = (Failure) result; + assertThat(failure.diagnotics(), hasSize(2)); + + assertThat(failure.diagnotics(), not(hasItem(matching(Diagnostic.Kind.ERROR)))); + } + + public void testPublicApiAnnotatedNotOpensearch() { + final CompilerResult result = compileWithPackage("org.acme", "PublicApiAnnotated.java"); + assertThat(result, instanceOf(Failure.class)); + + final Failure failure = (Failure) result; + assertThat(failure.diagnotics(), hasSize(3)); + + assertThat( + failure.diagnotics(), + hasItem( + matching( + Diagnostic.Kind.ERROR, + containsString( + "The type org.acme.PublicApiAnnotated is not residing in org.opensearch.* package and should not be annotated as OpenSearch APIs." + ) + ) + ) + ); + } + + public void testPublicApiMethodReturnAnnotatedGenerics() { + final CompilerResult result = compile( + "PublicApiMethodReturnAnnotatedGenerics.java", + "PublicApiAnnotated.java", + "NotAnnotatedAnnotation.java" + ); + assertThat(result, instanceOf(Failure.class)); + + final Failure failure = (Failure) result; + assertThat(failure.diagnotics(), hasSize(3)); + + assertThat( + failure.diagnotics(), + hasItem( + matching( + Diagnostic.Kind.ERROR, + containsString( + "The element org.opensearch.common.annotation.processor.NotAnnotatedAnnotation is part of the public APIs but is not maked as @PublicApi, @ExperimentalApi or @DeprecatedApi " + + "(referenced by org.opensearch.common.annotation.processor.PublicApiMethodReturnAnnotatedGenerics)" + ) + ) + ) + ); + } + + /** + * The type could expose protected inner types which are still considered to be a public API when used + */ + public void testPublicApiWithProtectedInterface() { + final CompilerResult result = compile("PublicApiWithProtectedInterface.java"); + assertThat(result, instanceOf(Failure.class)); + + final Failure failure = (Failure) result; + assertThat(failure.diagnotics(), hasSize(2)); + + assertThat(failure.diagnotics(), not(hasItem(matching(Diagnostic.Kind.ERROR)))); + } +} diff --git a/libs/common/src/test/java/org/opensearch/common/annotation/processor/CompilerSupport.java b/libs/common/src/test/java/org/opensearch/common/annotation/processor/CompilerSupport.java new file mode 100644 index 0000000000000..dcf8dd7945012 --- /dev/null +++ b/libs/common/src/test/java/org/opensearch/common/annotation/processor/CompilerSupport.java @@ -0,0 +1,139 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.hamcrest.Description; +import org.hamcrest.Matcher; +import org.hamcrest.TypeSafeMatcher; + +import javax.tools.Diagnostic; +import javax.tools.DiagnosticCollector; +import javax.tools.JavaCompiler; +import javax.tools.JavaCompiler.CompilationTask; +import javax.tools.JavaFileObject; +import javax.tools.JavaFileObject.Kind; +import javax.tools.SimpleJavaFileObject; +import javax.tools.StandardJavaFileManager; +import javax.tools.ToolProvider; + +import java.io.IOException; +import java.io.InputStream; +import java.io.StringWriter; +import java.io.UncheckedIOException; +import java.net.URI; +import java.net.URL; +import java.nio.charset.StandardCharsets; +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Locale; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +interface CompilerSupport { + default CompilerResult compile(String name, String... names) { + return compileWithPackage(ApiAnnotationProcessorTests.class.getPackageName(), name, names); + } + + default CompilerResult compileWithPackage(String pck, String name, String... names) { + final JavaCompiler compiler = ToolProvider.getSystemJavaCompiler(); + final DiagnosticCollector collector = new DiagnosticCollector<>(); + + try (StringWriter out = new StringWriter()) { + final StandardJavaFileManager fileManager = compiler.getStandardFileManager(collector, null, null); + final List files = Stream.concat(Stream.of(name), Arrays.stream(names)) + .map(f -> asSource(pck, f)) + .collect(Collectors.toList()); + + final CompilationTask task = compiler.getTask(out, fileManager, collector, null, null, files); + task.setProcessors(Collections.singleton(new ApiAnnotationProcessor())); + + if (AccessController.doPrivileged((PrivilegedAction) () -> task.call())) { + return new Success(); + } else { + return new Failure(collector.getDiagnostics()); + } + } catch (final IOException ex) { + throw new UncheckedIOException(ex); + } + } + + private static JavaFileObject asSource(String pkg, String name) { + final String resource = "/" + pkg.replaceAll("[.]", "/") + "/" + name; + final URL source = ApiAnnotationProcessorTests.class.getResource(resource); + + return new SimpleJavaFileObject(URI.create(source.toExternalForm()), Kind.SOURCE) { + @Override + public CharSequence getCharContent(boolean ignoreEncodingErrors) throws IOException { + try (final InputStream in = ApiAnnotationProcessorTests.class.getResourceAsStream(resource)) { + return new String(in.readAllBytes(), StandardCharsets.UTF_8); + } + } + }; + } + + class CompilerResult {} + + class Success extends CompilerResult { + + } + + class Failure extends CompilerResult { + private final List> diagnotics; + + Failure(List> diagnotics) { + this.diagnotics = diagnotics; + } + + List> diagnotics() { + return diagnotics; + } + } + + class HasDiagnostic extends TypeSafeMatcher> { + private final Diagnostic.Kind kind; + private final Matcher matcher; + + HasDiagnostic(final Diagnostic.Kind kind, final Matcher matcher) { + this.kind = kind; + this.matcher = matcher; + } + + @Override + public void describeTo(Description description) { + description.appendText("diagnostic with kind ").appendValue(kind).appendText(" "); + + if (matcher != null) { + description.appendText(" and message "); + matcher.describeTo(description); + } + } + + @Override + protected boolean matchesSafely(Diagnostic item) { + if (!kind.equals(item.getKind())) { + return false; + } else if (matcher != null) { + return matcher.matches(item.getMessage(Locale.ROOT)); + } else { + return true; + } + } + + public static HasDiagnostic matching(final Diagnostic.Kind kind, final Matcher matcher) { + return new HasDiagnostic(kind, matcher); + } + + public static HasDiagnostic matching(final Diagnostic.Kind kind) { + return new HasDiagnostic(kind, null); + } + } +} diff --git a/libs/common/src/test/java/org/opensearch/common/round/RoundableTests.java b/libs/common/src/test/java/org/opensearch/common/round/RoundableTests.java new file mode 100644 index 0000000000000..ae9f629c59024 --- /dev/null +++ b/libs/common/src/test/java/org/opensearch/common/round/RoundableTests.java @@ -0,0 +1,57 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.round; + +import org.opensearch.test.OpenSearchTestCase; + +public class RoundableTests extends OpenSearchTestCase { + + public void testFloor() { + int size = randomIntBetween(1, 256); + long[] values = new long[size]; + for (int i = 1; i < values.length; i++) { + values[i] = values[i - 1] + (randomNonNegativeLong() % 200) + 1; + } + + Roundable[] impls = { new BinarySearcher(values, size), new BidirectionalLinearSearcher(values, size) }; + + for (int i = 0; i < 100000; i++) { + // Index of the expected round-down point. + int idx = randomIntBetween(0, size - 1); + + // Value of the expected round-down point. + long expected = values[idx]; + + // Delta between the expected and the next round-down point. + long delta = (idx < size - 1) ? (values[idx + 1] - values[idx]) : 200; + + // Adding a random delta between 0 (inclusive) and delta (exclusive) to the expected + // round-down point, which will still floor to the same value. + long key = expected + (randomNonNegativeLong() % delta); + + for (Roundable roundable : impls) { + assertEquals(expected, roundable.floor(key)); + } + } + } + + public void testFailureCases() { + Throwable throwable; + + throwable = assertThrows(IllegalArgumentException.class, () -> new BinarySearcher(new long[0], 0)); + assertEquals("at least one value must be present", throwable.getMessage()); + throwable = assertThrows(IllegalArgumentException.class, () -> new BidirectionalLinearSearcher(new long[0], 0)); + assertEquals("at least one value must be present", throwable.getMessage()); + + throwable = assertThrows(AssertionError.class, () -> new BinarySearcher(new long[] { 100 }, 1).floor(50)); + assertEquals("key must be greater than or equal to 100", throwable.getMessage()); + throwable = assertThrows(AssertionError.class, () -> new BidirectionalLinearSearcher(new long[] { 100 }, 1).floor(50)); + assertEquals("key must be greater than or equal to 100", throwable.getMessage()); + } +} diff --git a/libs/common/src/test/resources/org/acme/PublicApiAnnotated.java b/libs/common/src/test/resources/org/acme/PublicApiAnnotated.java new file mode 100644 index 0000000000000..bc16fd996e69d --- /dev/null +++ b/libs/common/src/test/resources/org/acme/PublicApiAnnotated.java @@ -0,0 +1,16 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.acme; + +import org.opensearch.common.annotation.PublicApi; + +@PublicApi(since = "1.0.0") +public class PublicApiAnnotated { + +} diff --git a/libs/common/src/test/resources/org/opensearch/bootstrap/test.policy b/libs/common/src/test/resources/org/opensearch/bootstrap/test.policy new file mode 100644 index 0000000000000..e0a183b7eac88 --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/bootstrap/test.policy @@ -0,0 +1,16 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +grant { + // allow to use JVM tooling (Java Compiler) in tests for annotation processing + permission java.io.FilePermission "${java.home}/lib/*", "read"; + permission java.io.FilePermission "${java.home}/lib/modules/*", "read"; + permission java.lang.RuntimePermission "accessSystemModules"; + permission java.lang.RuntimePermission "accessDeclaredMembers"; + permission java.lang.RuntimePermission "accessClassInPackage.*"; +}; diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/DeprecatedApiMethodReturnSelf.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/DeprecatedApiMethodReturnSelf.java new file mode 100644 index 0000000000000..7c5b6f6ea2f51 --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/DeprecatedApiMethodReturnSelf.java @@ -0,0 +1,18 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.DeprecatedApi; + +@DeprecatedApi(since = "1.0.0") +public class DeprecatedApiMethodReturnSelf { + public DeprecatedApiMethodReturnSelf method() { + return new DeprecatedApiMethodReturnSelf(); + } +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/ExperimentalApiAnnotated.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/ExperimentalApiAnnotated.java new file mode 100644 index 0000000000000..5be07e22c811f --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/ExperimentalApiAnnotated.java @@ -0,0 +1,16 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.ExperimentalApi; + +@ExperimentalApi +public class ExperimentalApiAnnotated { + +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/ExperimentalApiMethodReturnSelf.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/ExperimentalApiMethodReturnSelf.java new file mode 100644 index 0000000000000..cde8f4f254faf --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/ExperimentalApiMethodReturnSelf.java @@ -0,0 +1,18 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.ExperimentalApi; + +@ExperimentalApi +public class ExperimentalApiMethodReturnSelf { + public ExperimentalApiMethodReturnSelf method() { + return new ExperimentalApiMethodReturnSelf(); + } +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/InternalApiAnnotated.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/InternalApiAnnotated.java new file mode 100644 index 0000000000000..9996ba8b736aa --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/InternalApiAnnotated.java @@ -0,0 +1,16 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.PublicApi; + +@PublicApi(since = "1.0.0") +public class InternalApiAnnotated { + +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/NotAnnotated.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/NotAnnotated.java new file mode 100644 index 0000000000000..ec16ce926ea86 --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/NotAnnotated.java @@ -0,0 +1,13 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +public class NotAnnotated { + +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/NotAnnotatedAnnotation.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/NotAnnotatedAnnotation.java new file mode 100644 index 0000000000000..a3e9c4f576d92 --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/NotAnnotatedAnnotation.java @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Target; + +@Target({ + ElementType.TYPE, + ElementType.TYPE_PARAMETER, + ElementType.TYPE_USE, + ElementType.PACKAGE, + ElementType.METHOD, + ElementType.CONSTRUCTOR, + ElementType.PARAMETER, + ElementType.FIELD, + ElementType.ANNOTATION_TYPE, + ElementType.MODULE }) +public @interface NotAnnotatedAnnotation { + +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/NotAnnotatedException.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/NotAnnotatedException.java new file mode 100644 index 0000000000000..0aadaf8f9bf31 --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/NotAnnotatedException.java @@ -0,0 +1,13 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +public class NotAnnotatedException extends Exception { + private static final long serialVersionUID = 1L; +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiAnnotated.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiAnnotated.java new file mode 100644 index 0000000000000..b2a7f03cb2d31 --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiAnnotated.java @@ -0,0 +1,16 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.PublicApi; + +@PublicApi(since = "1.0.0") +public class PublicApiAnnotated { + +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiConstructorArgumentAnnotatedInternalApi.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiConstructorArgumentAnnotatedInternalApi.java new file mode 100644 index 0000000000000..6bea2961a14e6 --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiConstructorArgumentAnnotatedInternalApi.java @@ -0,0 +1,20 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.InternalApi; +import org.opensearch.common.annotation.PublicApi; + +@PublicApi(since = "1.0.0") +public class PublicApiConstructorArgumentAnnotatedInternalApi { + /** + * The constructor arguments have relaxed semantics at the moment: those could be not annotated or be annotated as {@link InternalApi} + */ + public PublicApiConstructorArgumentAnnotatedInternalApi(InternalApiAnnotated arg) {} +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiConstructorArgumentNotAnnotated.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiConstructorArgumentNotAnnotated.java new file mode 100644 index 0000000000000..6c7481d9978cd --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiConstructorArgumentNotAnnotated.java @@ -0,0 +1,20 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.InternalApi; +import org.opensearch.common.annotation.PublicApi; + +@PublicApi(since = "1.0.0") +public class PublicApiConstructorArgumentNotAnnotated { + /** + * The constructor arguments have relaxed semantics at the moment: those could be not annotated or be annotated as {@link InternalApi} + */ + public PublicApiConstructorArgumentNotAnnotated(NotAnnotated arg) {} +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodArgumentAnnotatedPackagePrivate.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodArgumentAnnotatedPackagePrivate.java new file mode 100644 index 0000000000000..5dae56a7cd7d3 --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodArgumentAnnotatedPackagePrivate.java @@ -0,0 +1,20 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.PublicApi; + +@PublicApi(since = "1.0.0") +public class PublicApiMethodArgumentAnnotatedPackagePrivate { + public void method(AnnotatedPackagePrivate arg) {} +} + +// The public API exposes this class through public method argument, it should be public +@PublicApi(since = "1.0.0") +class AnnotatedPackagePrivate {} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodArgumentNotAnnotated.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodArgumentNotAnnotated.java new file mode 100644 index 0000000000000..ddfec939f79e8 --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodArgumentNotAnnotated.java @@ -0,0 +1,16 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.PublicApi; + +@PublicApi(since = "1.0.0") +public class PublicApiMethodArgumentNotAnnotated { + public void method(NotAnnotated arg) {} +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodArgumentNotAnnotatedGenerics.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodArgumentNotAnnotatedGenerics.java new file mode 100644 index 0000000000000..d32502831d299 --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodArgumentNotAnnotatedGenerics.java @@ -0,0 +1,18 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.PublicApi; + +import java.util.Collection; + +@PublicApi(since = "1.0.0") +public class PublicApiMethodArgumentNotAnnotatedGenerics { + public void method(Collection arg) {} +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodArgumentNotAnnotatedPackagePrivate.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodArgumentNotAnnotatedPackagePrivate.java new file mode 100644 index 0000000000000..d4fb31b172ef2 --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodArgumentNotAnnotatedPackagePrivate.java @@ -0,0 +1,19 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.PublicApi; + +@PublicApi(since = "1.0.0") +public class PublicApiMethodArgumentNotAnnotatedPackagePrivate { + public void method(NotAnnotatedPackagePrivate arg) {} +} + +// The public API exposes this class through public method argument, it should be annotated and be public +class NotAnnotatedPackagePrivate {} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodGenericsArgumentAnnotated.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodGenericsArgumentAnnotated.java new file mode 100644 index 0000000000000..9715748cfa659 --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodGenericsArgumentAnnotated.java @@ -0,0 +1,16 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.PublicApi; + +@PublicApi(since = "1.0.0") +public class PublicApiMethodGenericsArgumentAnnotated { + public void method(T arg) {} +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodGenericsArgumentNotAnnotated.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodGenericsArgumentNotAnnotated.java new file mode 100644 index 0000000000000..f149c1f34b067 --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodGenericsArgumentNotAnnotated.java @@ -0,0 +1,16 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.PublicApi; + +@PublicApi(since = "1.0.0") +public class PublicApiMethodGenericsArgumentNotAnnotated { + public void method(T arg) {} +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnAnnotatedArray.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnAnnotatedArray.java new file mode 100644 index 0000000000000..39b7e146fe1e7 --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnAnnotatedArray.java @@ -0,0 +1,18 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.PublicApi; + +@PublicApi(since = "1.0.0") +public class PublicApiMethodReturnAnnotatedArray { + public PublicApiAnnotated[] method() { + return new PublicApiAnnotated[0]; + } +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnAnnotatedGenerics.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnAnnotatedGenerics.java new file mode 100644 index 0000000000000..2171eccee2f31 --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnAnnotatedGenerics.java @@ -0,0 +1,23 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.PublicApi; + +import java.util.Collection; +import java.util.Collections; + +import org.acme.PublicApiAnnotated; + +@PublicApi(since = "1.0.0") +public class PublicApiMethodReturnAnnotatedGenerics { + public Collection<@NotAnnotatedAnnotation PublicApiAnnotated> method() { + return Collections.emptyList(); + } +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnNotAnnotated.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnNotAnnotated.java new file mode 100644 index 0000000000000..725d06072d0ea --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnNotAnnotated.java @@ -0,0 +1,18 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.PublicApi; + +@PublicApi(since = "1.0.0") +public class PublicApiMethodReturnNotAnnotated { + public NotAnnotated method() { + return new NotAnnotated(); + } +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnNotAnnotatedAnnotation.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnNotAnnotatedAnnotation.java new file mode 100644 index 0000000000000..b684e36a53da1 --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnNotAnnotatedAnnotation.java @@ -0,0 +1,18 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.PublicApi; + +@PublicApi(since = "1.0.0") +public class PublicApiMethodReturnNotAnnotatedAnnotation { + public @NotAnnotatedAnnotation PublicApiAnnotated method() { + return new PublicApiAnnotated(); + } +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnNotAnnotatedArray.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnNotAnnotatedArray.java new file mode 100644 index 0000000000000..e4c541dcea57f --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnNotAnnotatedArray.java @@ -0,0 +1,18 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.PublicApi; + +@PublicApi(since = "1.0.0") +public class PublicApiMethodReturnNotAnnotatedArray { + public NotAnnotated[] method() { + return new NotAnnotated[0]; + } +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnNotAnnotatedBoundedGenerics.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnNotAnnotatedBoundedGenerics.java new file mode 100644 index 0000000000000..0646faf152610 --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnNotAnnotatedBoundedGenerics.java @@ -0,0 +1,21 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.PublicApi; + +import java.util.Collection; +import java.util.Collections; + +@PublicApi(since = "1.0.0") +public class PublicApiMethodReturnNotAnnotatedBoundedGenerics { + public Collection method() { + return Collections.emptyList(); + } +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnNotAnnotatedGenerics.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnNotAnnotatedGenerics.java new file mode 100644 index 0000000000000..2227883c707d0 --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnNotAnnotatedGenerics.java @@ -0,0 +1,21 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.PublicApi; + +import java.util.Collection; +import java.util.Collections; + +@PublicApi(since = "1.0.0") +public class PublicApiMethodReturnNotAnnotatedGenerics { + public Collection method() { + return Collections.emptyList(); + } +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnNotAnnotatedWildcardGenerics.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnNotAnnotatedWildcardGenerics.java new file mode 100644 index 0000000000000..f2818ebb23c4a --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnNotAnnotatedWildcardGenerics.java @@ -0,0 +1,21 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.PublicApi; + +import java.util.Collection; +import java.util.Collections; + +@PublicApi(since = "1.0.0") +public class PublicApiMethodReturnNotAnnotatedWildcardGenerics { + public Collection method() { + return Collections.emptyList(); + } +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnSelf.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnSelf.java new file mode 100644 index 0000000000000..883471b23ae0f --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnSelf.java @@ -0,0 +1,18 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.PublicApi; + +@PublicApi(since = "1.0.0") +public class PublicApiMethodReturnSelf { + public PublicApiMethodReturnSelf method() { + return new PublicApiMethodReturnSelf(); + } +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodThrowsNotAnnotated.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodThrowsNotAnnotated.java new file mode 100644 index 0000000000000..496b243276565 --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodThrowsNotAnnotated.java @@ -0,0 +1,16 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.PublicApi; + +@PublicApi(since = "1.0.0") +public class PublicApiMethodThrowsNotAnnotated { + public void method(PublicApiAnnotated arg) throws NotAnnotatedException {} +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiPackagePrivate.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiPackagePrivate.java new file mode 100644 index 0000000000000..88c20e7f4c8f1 --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiPackagePrivate.java @@ -0,0 +1,16 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.PublicApi; + +@PublicApi(since = "1.0.0") +class PublicApiPackagePrivate { + void method() {} +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiWithExperimentalApiMethod.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiWithExperimentalApiMethod.java new file mode 100644 index 0000000000000..faaaa1d9f4051 --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiWithExperimentalApiMethod.java @@ -0,0 +1,18 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.annotation.PublicApi; + +@PublicApi(since = "1.0.0") +public class PublicApiWithExperimentalApiMethod { + @ExperimentalApi + public void method() {} +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiWithInternalApiMethod.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiWithInternalApiMethod.java new file mode 100644 index 0000000000000..5bfa3c9f3e008 --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiWithInternalApiMethod.java @@ -0,0 +1,19 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.InternalApi; +import org.opensearch.common.annotation.PublicApi; + +@PublicApi(since = "1.0.0") +public class PublicApiWithInternalApiMethod { + // The public API exposes internal API method, it should be public API + @InternalApi + public void method() {} +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiWithPackagePrivateMethod.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiWithPackagePrivateMethod.java new file mode 100644 index 0000000000000..1345467423530 --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiWithPackagePrivateMethod.java @@ -0,0 +1,16 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.PublicApi; + +@PublicApi(since = "1.0.0") +public class PublicApiWithPackagePrivateMethod { + void method(NotAnnotated arg) {} +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiWithProtectedInterface.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiWithProtectedInterface.java new file mode 100644 index 0000000000000..222ae01fd15e6 --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiWithProtectedInterface.java @@ -0,0 +1,22 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.PublicApi; + +@PublicApi(since = "1.0.0") +public class PublicApiWithProtectedInterface { + public void method(ProtectedInterface iface) {} + + /** + * The type could expose protected inner types which are still considered to be a public API when used + */ + @PublicApi(since = "1.0.0") + protected interface ProtectedInterface {} +} diff --git a/libs/core/build.gradle b/libs/core/build.gradle index 46b6f4471731f..4850b5aea5c85 100644 --- a/libs/core/build.gradle +++ b/libs/core/build.gradle @@ -33,7 +33,7 @@ import org.opensearch.gradle.info.BuildParams apply plugin: 'opensearch.publish' base { - archivesBaseName = 'opensearch-core' + archivesName = 'opensearch-core' } // we want to keep the JDKs in our IDEs set to JDK 8 until minimum JDK is bumped to 11 so we do not include this source set in our IDEs diff --git a/libs/core/licenses/jackson-core-2.15.2.jar.sha1 b/libs/core/licenses/jackson-core-2.15.2.jar.sha1 deleted file mode 100644 index ec6781b968eed..0000000000000 --- a/libs/core/licenses/jackson-core-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a6fe1836469a69b3ff66037c324d75fc66ef137c \ No newline at end of file diff --git a/libs/core/licenses/jackson-core-2.16.0.jar.sha1 b/libs/core/licenses/jackson-core-2.16.0.jar.sha1 new file mode 100644 index 0000000000000..c2b70fb4ae202 --- /dev/null +++ b/libs/core/licenses/jackson-core-2.16.0.jar.sha1 @@ -0,0 +1 @@ +899e5cf01be55fbf094ad72b2edb0c5df99111ee \ No newline at end of file diff --git a/libs/core/src/main/java/org/opensearch/Version.java b/libs/core/src/main/java/org/opensearch/Version.java index 8d9ee73a02c1d..d94be3f25b53d 100644 --- a/libs/core/src/main/java/org/opensearch/Version.java +++ b/libs/core/src/main/java/org/opensearch/Version.java @@ -97,6 +97,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_2_10_1 = new Version(2100199, org.apache.lucene.util.Version.LUCENE_9_7_0); public static final Version V_2_11_0 = new Version(2110099, org.apache.lucene.util.Version.LUCENE_9_7_0); public static final Version V_2_11_1 = new Version(2110199, org.apache.lucene.util.Version.LUCENE_9_7_0); + public static final Version V_2_11_2 = new Version(2110299, org.apache.lucene.util.Version.LUCENE_9_7_0); public static final Version V_2_12_0 = new Version(2120099, org.apache.lucene.util.Version.LUCENE_9_8_0); public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_8_0); public static final Version CURRENT = V_3_0_0; diff --git a/libs/core/src/main/java/org/opensearch/core/action/ActionResponse.java b/libs/core/src/main/java/org/opensearch/core/action/ActionResponse.java index 041d8b1bffb4a..7525bfb243aae 100644 --- a/libs/core/src/main/java/org/opensearch/core/action/ActionResponse.java +++ b/libs/core/src/main/java/org/opensearch/core/action/ActionResponse.java @@ -32,6 +32,7 @@ package org.opensearch.core.action; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.transport.TransportResponse; @@ -42,6 +43,7 @@ * * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class ActionResponse extends TransportResponse { public ActionResponse() {} diff --git a/libs/core/src/main/java/org/opensearch/core/common/bytes/AbstractBytesReference.java b/libs/core/src/main/java/org/opensearch/core/common/bytes/AbstractBytesReference.java index 8c1efcd00c24e..a2bf7e499dee8 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/bytes/AbstractBytesReference.java +++ b/libs/core/src/main/java/org/opensearch/core/common/bytes/AbstractBytesReference.java @@ -53,11 +53,6 @@ public abstract class AbstractBytesReference implements BytesReference { private Integer hash = null; private static final int MAX_UTF16_LENGTH = Integer.MAX_VALUE >> 1; - @Override - public int getInt(int index) { - return (get(index) & 0xFF) << 24 | (get(index + 1) & 0xFF) << 16 | (get(index + 2) & 0xFF) << 8 | get(index + 3) & 0xFF; - } - @Override public int indexOf(byte marker, int from) { final int to = length(); diff --git a/libs/core/src/main/java/org/opensearch/core/common/bytes/BytesArray.java b/libs/core/src/main/java/org/opensearch/core/common/bytes/BytesArray.java index ae04ddcc19eee..d7a8414935143 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/bytes/BytesArray.java +++ b/libs/core/src/main/java/org/opensearch/core/common/bytes/BytesArray.java @@ -32,6 +32,7 @@ package org.opensearch.core.common.bytes; +import org.apache.lucene.util.BitUtil; import org.apache.lucene.util.BytesRef; import org.opensearch.core.common.io.stream.StreamInput; @@ -83,6 +84,11 @@ public byte get(int index) { return bytes[offset + index]; } + @Override + public int getInt(int index) { + return (int) BitUtil.VH_BE_INT.get(bytes, offset + index); + } + @Override public int length() { return length; diff --git a/libs/core/src/main/java/org/opensearch/core/common/bytes/BytesReference.java b/libs/core/src/main/java/org/opensearch/core/common/bytes/BytesReference.java index 9d24d3653397b..8cb65c9feb1ca 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/bytes/BytesReference.java +++ b/libs/core/src/main/java/org/opensearch/core/common/bytes/BytesReference.java @@ -153,9 +153,11 @@ static BytesReference fromByteArray(ByteArray byteArray, int length) { byte get(int index); /** - * Returns the integer read from the 4 bytes (BE) starting at the given index. + * Returns the integer read from the 4 bytes (big endian) starting at the given index. */ - int getInt(int index); + default int getInt(int index) { + return ((get(index) & 0xFF) << 24) | ((get(index + 1) & 0xFF) << 16) | ((get(index + 2) & 0xFF) << 8) | (get(index + 3) & 0xFF); + } /** * Finds the index of the first occurrence of the given marker between within the given bounds. diff --git a/libs/core/src/main/java/org/opensearch/core/common/io/stream/BytesStreamInput.java b/libs/core/src/main/java/org/opensearch/core/common/io/stream/BytesStreamInput.java index 30c84708728ef..cad43f817faaf 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/io/stream/BytesStreamInput.java +++ b/libs/core/src/main/java/org/opensearch/core/common/io/stream/BytesStreamInput.java @@ -8,6 +8,7 @@ package org.opensearch.core.common.io.stream; +import org.apache.lucene.util.BitUtil; import org.apache.lucene.util.BytesRef; import java.io.EOFException; @@ -121,4 +122,33 @@ public int read() throws IOException { return bytes[pos++] & 0xFF; } + @Override + public short readShort() throws IOException { + if (available() < Short.BYTES) { + throw new EOFException(); + } + short value = (short) BitUtil.VH_BE_SHORT.get(bytes, pos); + pos += Short.BYTES; + return value; + } + + @Override + public int readInt() throws IOException { + if (available() < Integer.BYTES) { + throw new EOFException(); + } + int value = (int) BitUtil.VH_BE_INT.get(bytes, pos); + pos += Integer.BYTES; + return value; + } + + @Override + public long readLong() throws IOException { + if (available() < Long.BYTES) { + throw new EOFException(); + } + long value = (long) BitUtil.VH_BE_LONG.get(bytes, pos); + pos += Long.BYTES; + return value; + } } diff --git a/libs/core/src/main/java/org/opensearch/core/common/io/stream/Writeable.java b/libs/core/src/main/java/org/opensearch/core/common/io/stream/Writeable.java index af9df51655414..960f4bec5eeb5 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/io/stream/Writeable.java +++ b/libs/core/src/main/java/org/opensearch/core/common/io/stream/Writeable.java @@ -32,6 +32,8 @@ package org.opensearch.core.common.io.stream; +import org.opensearch.common.annotation.PublicApi; + import java.io.IOException; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; @@ -41,8 +43,9 @@ * across the wire" using OpenSearch's internal protocol. If the implementer also implements equals and hashCode then a copy made by * serializing and deserializing must be equal and have the same hashCode. It isn't required that such a copy be entirely unchanged. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.8.0") public interface Writeable { /** * A WriteableRegistry registers {@link Writer} methods for writing data types over a @@ -135,8 +138,11 @@ public static Class getCustomClassFromInstance(final Object value) { * out.writeMapOfLists(someMap, StreamOutput::writeString, StreamOutput::writeString); * } * + * + * @opensearch.api */ @FunctionalInterface + @PublicApi(since = "2.8.0") interface Writer { /** @@ -161,8 +167,11 @@ interface Writer { * this.someMap = in.readMapOfLists(StreamInput::readString, StreamInput::readString); * } * + * + * @opensearch.api */ @FunctionalInterface + @PublicApi(since = "2.8.0") interface Reader { /** diff --git a/libs/core/src/main/java/org/opensearch/core/common/util/ByteArray.java b/libs/core/src/main/java/org/opensearch/core/common/util/ByteArray.java index e50f24417f8bc..f4d81c4ca4363 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/util/ByteArray.java +++ b/libs/core/src/main/java/org/opensearch/core/common/util/ByteArray.java @@ -33,14 +33,16 @@ package org.opensearch.core.common.util; import org.apache.lucene.util.BytesRef; +import org.opensearch.common.annotation.PublicApi; import java.nio.ByteBuffer; /** * Abstraction of an array of byte values. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ByteArray extends BigArray { /** diff --git a/libs/core/src/main/java/org/opensearch/core/indices/breaker/AllCircuitBreakerStats.java b/libs/core/src/main/java/org/opensearch/core/indices/breaker/AllCircuitBreakerStats.java index 3ce8b4953b9d6..992655efec8f0 100644 --- a/libs/core/src/main/java/org/opensearch/core/indices/breaker/AllCircuitBreakerStats.java +++ b/libs/core/src/main/java/org/opensearch/core/indices/breaker/AllCircuitBreakerStats.java @@ -32,6 +32,7 @@ package org.opensearch.core.indices.breaker; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -43,8 +44,9 @@ /** * Stats class encapsulating all of the different circuit breaker stats * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class AllCircuitBreakerStats implements Writeable, ToXContentFragment { /** An array of all the circuit breaker stats */ diff --git a/libs/core/src/main/java/org/opensearch/core/indices/breaker/CircuitBreakerService.java b/libs/core/src/main/java/org/opensearch/core/indices/breaker/CircuitBreakerService.java index ee9c94f432a36..dedeb0803271f 100644 --- a/libs/core/src/main/java/org/opensearch/core/indices/breaker/CircuitBreakerService.java +++ b/libs/core/src/main/java/org/opensearch/core/indices/breaker/CircuitBreakerService.java @@ -32,8 +32,7 @@ package org.opensearch.core.indices.breaker; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.core.common.breaker.CircuitBreaker; @@ -41,11 +40,10 @@ * Interface for Circuit Breaker services, which provide breakers to classes * that load field data. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class CircuitBreakerService extends AbstractLifecycleComponent { - private static final Logger logger = LogManager.getLogger(CircuitBreakerService.class); - protected CircuitBreakerService() {} /** diff --git a/libs/core/src/main/java/org/opensearch/core/indices/breaker/CircuitBreakerStats.java b/libs/core/src/main/java/org/opensearch/core/indices/breaker/CircuitBreakerStats.java index 9207d3ea77227..ee71cf8d2ac0e 100644 --- a/libs/core/src/main/java/org/opensearch/core/indices/breaker/CircuitBreakerStats.java +++ b/libs/core/src/main/java/org/opensearch/core/indices/breaker/CircuitBreakerStats.java @@ -32,6 +32,7 @@ package org.opensearch.core.indices.breaker; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -45,8 +46,9 @@ /** * Class encapsulating stats about the {@link org.opensearch.core.common.breaker.CircuitBreaker} * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CircuitBreakerStats implements Writeable, ToXContentObject { /** The name of the circuit breaker */ diff --git a/libs/core/src/main/java/org/opensearch/core/util/BytesRefUtils.java b/libs/core/src/main/java/org/opensearch/core/util/BytesRefUtils.java index 30c9f182fcae6..2aad068534b9d 100644 --- a/libs/core/src/main/java/org/opensearch/core/util/BytesRefUtils.java +++ b/libs/core/src/main/java/org/opensearch/core/util/BytesRefUtils.java @@ -32,6 +32,7 @@ package org.opensearch.core.util; +import org.apache.lucene.util.BitUtil; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefArray; import org.apache.lucene.util.BytesRefBuilder; @@ -103,12 +104,12 @@ public static int sortAndDedup(final BytesRefArray bytes, final int[] indices) { return uniqueCount; } + /** + * Decodes a long value written as bytes in big endian order. + * @param bytes in big endian order + * @return long value + */ public static long bytesToLong(BytesRef bytes) { - int high = (bytes.bytes[bytes.offset + 0] << 24) | ((bytes.bytes[bytes.offset + 1] & 0xff) << 16) | ((bytes.bytes[bytes.offset + 2] - & 0xff) << 8) | (bytes.bytes[bytes.offset + 3] & 0xff); - int low = (bytes.bytes[bytes.offset + 4] << 24) | ((bytes.bytes[bytes.offset + 5] & 0xff) << 16) | ((bytes.bytes[bytes.offset + 6] - & 0xff) << 8) | (bytes.bytes[bytes.offset + 7] & 0xff); - return (((long) high) << 32) | (low & 0x0ffffffffL); + return (long) BitUtil.VH_BE_LONG.get(bytes.bytes, bytes.offset); } - } diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/ObjectParser.java b/libs/core/src/main/java/org/opensearch/core/xcontent/ObjectParser.java index 365b36c755dd2..04d0bce27c04f 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/ObjectParser.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/ObjectParser.java @@ -32,6 +32,7 @@ package org.opensearch.core.xcontent; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import java.io.IOException; @@ -83,8 +84,9 @@ * It's highly recommended to use the high level declare methods like {@link #declareString(BiConsumer, ParseField)} instead of * {@link #declareField} which can be used to implement exceptional parsing operations not covered by the high level methods. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class ObjectParser extends AbstractObjectParser implements BiFunction, diff --git a/libs/core/src/test/java/org/opensearch/core/util/BytesRefUtilsTests.java b/libs/core/src/test/java/org/opensearch/core/util/BytesRefUtilsTests.java index 421263b883f2a..214f9292ae3a5 100644 --- a/libs/core/src/test/java/org/opensearch/core/util/BytesRefUtilsTests.java +++ b/libs/core/src/test/java/org/opensearch/core/util/BytesRefUtilsTests.java @@ -12,7 +12,6 @@ import org.apache.lucene.util.BytesRefArray; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.Counter; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.test.OpenSearchTestCase; import java.nio.ByteBuffer; @@ -90,8 +89,12 @@ public void testSortByteRefArray() { } public void testBytesToLong() { - final long value = randomLong(); - final BytesReference buffer = BytesReference.fromByteBuffer(ByteBuffer.allocate(8).putLong(value).flip()); - assertThat(BytesRefUtils.bytesToLong(buffer.toBytesRef()), equalTo(value)); + long value = randomLong(); + int paddingStart = randomIntBetween(0, 10); + int paddingEnd = randomIntBetween(0, 10); + byte[] bytes = new byte[paddingStart + Long.BYTES + paddingEnd]; + ByteBuffer.wrap(bytes).putLong(paddingStart, value); + BytesRef bytesRef = new BytesRef(bytes, paddingStart, Long.BYTES); + assertThat(BytesRefUtils.bytesToLong(bytesRef), equalTo(value)); } } diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultSpanScope.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultSpanScope.java index a5d515443b54d..93600da510977 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultSpanScope.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultSpanScope.java @@ -21,6 +21,7 @@ class DefaultSpanScope implements SpanScope { private final Span span; private final SpanScope previousSpanScope; + private final Span beforeSpan; private static final ThreadLocal spanScopeThreadLocal = new ThreadLocal<>(); private final TracerContextStorage tracerContextStorage; @@ -29,8 +30,14 @@ class DefaultSpanScope implements SpanScope { * @param span span * @param previousSpanScope before attached span scope. */ - private DefaultSpanScope(Span span, SpanScope previousSpanScope, TracerContextStorage tracerContextStorage) { + private DefaultSpanScope( + Span span, + final Span beforeSpan, + SpanScope previousSpanScope, + TracerContextStorage tracerContextStorage + ) { this.span = Objects.requireNonNull(span); + this.beforeSpan = beforeSpan; this.previousSpanScope = previousSpanScope; this.tracerContextStorage = tracerContextStorage; } @@ -43,26 +50,27 @@ private DefaultSpanScope(Span span, SpanScope previousSpanScope, TracerContextSt */ public static SpanScope create(Span span, TracerContextStorage tracerContextStorage) { final SpanScope beforeSpanScope = spanScopeThreadLocal.get(); - SpanScope newSpanScope = new DefaultSpanScope(span, beforeSpanScope, tracerContextStorage); - spanScopeThreadLocal.set(newSpanScope); + final Span beforeSpan = tracerContextStorage.get(TracerContextStorage.CURRENT_SPAN); + SpanScope newSpanScope = new DefaultSpanScope(span, beforeSpan, beforeSpanScope, tracerContextStorage); return newSpanScope; } @Override public void close() { detach(); - spanScopeThreadLocal.set(previousSpanScope); } @Override public SpanScope attach() { + spanScopeThreadLocal.set(this); tracerContextStorage.put(TracerContextStorage.CURRENT_SPAN, this.span); return this; } private void detach() { - if (previousSpanScope != null) { - tracerContextStorage.put(TracerContextStorage.CURRENT_SPAN, previousSpanScope.getSpan()); + spanScopeThreadLocal.set(previousSpanScope); + if (beforeSpan != null) { + tracerContextStorage.put(TracerContextStorage.CURRENT_SPAN, beforeSpan); } else { tracerContextStorage.put(TracerContextStorage.CURRENT_SPAN, null); } diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultTracer.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultTracer.java index a3bb64ea392a9..8f1a26d99e725 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultTracer.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultTracer.java @@ -12,7 +12,7 @@ import java.io.Closeable; import java.io.IOException; -import java.util.List; +import java.util.Collection; import java.util.Map; import java.util.Optional; @@ -53,7 +53,6 @@ public Span startSpan(SpanCreationContext context) { parentSpan = getCurrentSpanInternal(); } Span span = createSpan(context, parentSpan); - setCurrentSpanInContext(span); addDefaultAttributes(span); return span; } @@ -94,10 +93,6 @@ private Span createSpan(SpanCreationContext spanCreationContext, Span parentSpan return tracingTelemetry.createSpan(spanCreationContext, parentSpan); } - private void setCurrentSpanInContext(Span span) { - tracerContextStorage.put(TracerContextStorage.CURRENT_SPAN, span); - } - /** * Adds default attributes in the span * @param span the current active span @@ -107,7 +102,7 @@ protected void addDefaultAttributes(Span span) { } @Override - public Span startSpan(SpanCreationContext spanCreationContext, Map> headers) { + public Span startSpan(SpanCreationContext spanCreationContext, Map> headers) { Optional propagatedSpan = tracingTelemetry.getContextPropagator().extractFromHeaders(headers); return startSpan(spanCreationContext.parent(propagatedSpan.map(SpanContext::new).orElse(null))); } diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Tracer.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Tracer.java index 8257d251e9560..9b49ca7668992 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Tracer.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Tracer.java @@ -9,7 +9,7 @@ package org.opensearch.telemetry.tracing; import org.opensearch.common.annotation.ExperimentalApi; -import org.opensearch.telemetry.tracing.http.HttpTracer; +import org.opensearch.telemetry.tracing.transport.TransportTracer; import java.io.Closeable; @@ -22,7 +22,7 @@ * @opensearch.experimental */ @ExperimentalApi -public interface Tracer extends HttpTracer, Closeable { +public interface Tracer extends TransportTracer, Closeable { /** * Starts the {@link Span} with given {@link SpanCreationContext} * diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracingContextPropagator.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracingContextPropagator.java index 5fbc5d329e227..d7d48d1db10d6 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracingContextPropagator.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracingContextPropagator.java @@ -10,7 +10,7 @@ import org.opensearch.common.annotation.ExperimentalApi; -import java.util.List; +import java.util.Collection; import java.util.Map; import java.util.Optional; import java.util.function.BiConsumer; @@ -36,7 +36,7 @@ public interface TracingContextPropagator { * @param headers request headers to extract the context from * @return current span */ - Optional extractFromHeaders(Map> headers); + Optional extractFromHeaders(Map> headers); /** * Injects tracing context diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopTracer.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopTracer.java index 50452ff5fe3b4..c57eaccf1f3df 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopTracer.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopTracer.java @@ -16,7 +16,7 @@ import org.opensearch.telemetry.tracing.SpanScope; import org.opensearch.telemetry.tracing.Tracer; -import java.util.List; +import java.util.Collection; import java.util.Map; /** @@ -65,7 +65,7 @@ public void close() { } @Override - public Span startSpan(SpanCreationContext spanCreationContext, Map> header) { + public Span startSpan(SpanCreationContext spanCreationContext, Map> header) { return NoopSpan.INSTANCE; } } diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/http/HttpTracer.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/transport/TransportTracer.java similarity index 64% rename from libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/http/HttpTracer.java rename to libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/transport/TransportTracer.java index 50d18c0a0d040..5883d7de8e83a 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/http/HttpTracer.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/transport/TransportTracer.java @@ -6,31 +6,31 @@ * compatible open source license. */ -package org.opensearch.telemetry.tracing.http; +package org.opensearch.telemetry.tracing.transport; import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.telemetry.tracing.Span; import org.opensearch.telemetry.tracing.SpanCreationContext; -import java.util.List; +import java.util.Collection; import java.util.Map; /** - * HttpTracer helps in creating a {@link Span} which reads the incoming tracing information - * from the HttpRequest header and propagate the span accordingly. + * TransportTracer helps in creating a {@link Span} which reads the incoming tracing information + * from the HTTP or TCP transport headers and propagate the span accordingly. *

* All methods on the Tracer object are multi-thread safe. * * @opensearch.experimental */ @ExperimentalApi -public interface HttpTracer { +public interface TransportTracer { /** * Start the span with propagating the tracing info from the HttpRequest header. * * @param spanCreationContext span name. - * @param header http request header. - * @return span. + * @param headers transport headers + * @return the span instance */ - Span startSpan(SpanCreationContext spanCreationContext, Map> header); + Span startSpan(SpanCreationContext spanCreationContext, Map> headers); } diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/transport/package-info.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/transport/package-info.java new file mode 100644 index 0000000000000..87ffcc43184bb --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/transport/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Contains HTTP or TCP transport related tracer capabilities + */ +package org.opensearch.telemetry.tracing.transport; diff --git a/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultTracerTests.java b/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultTracerTests.java index 2a791f1ae4164..2182b3ea28ac8 100644 --- a/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultTracerTests.java +++ b/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultTracerTests.java @@ -22,6 +22,8 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutorService; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.CoreMatchers.nullValue; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; @@ -35,7 +37,6 @@ public class DefaultTracerTests extends OpenSearchTestCase { private Span mockSpan; private Span mockParentSpan; - private SpanScope mockSpanScope; private ThreadPool threadPool; private ExecutorService executorService; private SpanCreationContext spanCreationContext; @@ -102,11 +103,11 @@ public void testCreateSpanWithAttributes() { Span span = defaultTracer.startSpan(spanCreationContext); - assertEquals("span_name", defaultTracer.getCurrentSpan().getSpan().getSpanName()); - assertEquals(1.0, ((MockSpan) defaultTracer.getCurrentSpan().getSpan()).getAttribute("key1")); - assertEquals(2l, ((MockSpan) defaultTracer.getCurrentSpan().getSpan()).getAttribute("key2")); - assertEquals(true, ((MockSpan) defaultTracer.getCurrentSpan().getSpan()).getAttribute("key3")); - assertEquals("key4", ((MockSpan) defaultTracer.getCurrentSpan().getSpan()).getAttribute("key4")); + assertThat(defaultTracer.getCurrentSpan(), is(nullValue())); + assertEquals(1.0, ((MockSpan) span).getAttribute("key1")); + assertEquals(2l, ((MockSpan) span).getAttribute("key2")); + assertEquals(true, ((MockSpan) span).getAttribute("key3")); + assertEquals("key4", ((MockSpan) span).getAttribute("key4")); span.endSpan(); } @@ -121,16 +122,18 @@ public void testCreateSpanWithParent() { Span span = defaultTracer.startSpan(spanCreationContext, null); - SpanContext parentSpan = defaultTracer.getCurrentSpan(); - - SpanCreationContext spanCreationContext1 = buildSpanCreationContext("span_name_1", Attributes.EMPTY, parentSpan.getSpan()); + try (final SpanScope scope = defaultTracer.withSpanInScope(span)) { + SpanContext parentSpan = defaultTracer.getCurrentSpan(); - Span span1 = defaultTracer.startSpan(spanCreationContext1); + SpanCreationContext spanCreationContext1 = buildSpanCreationContext("span_name_1", Attributes.EMPTY, parentSpan.getSpan()); - assertEquals("span_name_1", defaultTracer.getCurrentSpan().getSpan().getSpanName()); - assertEquals(parentSpan.getSpan(), defaultTracer.getCurrentSpan().getSpan().getParentSpan()); - span1.endSpan(); - span.endSpan(); + try (final ScopedSpan span1 = defaultTracer.startScopedSpan(spanCreationContext1)) { + assertEquals("span_name_1", defaultTracer.getCurrentSpan().getSpan().getSpanName()); + assertEquals(parentSpan.getSpan(), defaultTracer.getCurrentSpan().getSpan().getParentSpan()); + } + } finally { + span.endSpan(); + } } @SuppressWarnings("unchecked") @@ -155,8 +158,7 @@ public void testCreateSpanWithNullParent() { Span span = defaultTracer.startSpan(spanCreationContext); - assertEquals("span_name", defaultTracer.getCurrentSpan().getSpan().getSpanName()); - assertEquals(null, defaultTracer.getCurrentSpan().getSpan().getParentSpan()); + assertThat(defaultTracer.getCurrentSpan(), is(nullValue())); span.endSpan(); } @@ -403,7 +405,6 @@ private void setupMocks() { mockTracingTelemetry = mock(TracingTelemetry.class); mockSpan = mock(Span.class); mockParentSpan = mock(Span.class); - mockSpanScope = mock(SpanScope.class); mockTracerContextStorage = mock(TracerContextStorage.class); when(mockSpan.getSpanName()).thenReturn("span_name"); when(mockSpan.getSpanId()).thenReturn("span_id"); diff --git a/libs/x-content/licenses/jackson-core-2.15.2.jar.sha1 b/libs/x-content/licenses/jackson-core-2.15.2.jar.sha1 deleted file mode 100644 index ec6781b968eed..0000000000000 --- a/libs/x-content/licenses/jackson-core-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a6fe1836469a69b3ff66037c324d75fc66ef137c \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-core-2.16.0.jar.sha1 b/libs/x-content/licenses/jackson-core-2.16.0.jar.sha1 new file mode 100644 index 0000000000000..c2b70fb4ae202 --- /dev/null +++ b/libs/x-content/licenses/jackson-core-2.16.0.jar.sha1 @@ -0,0 +1 @@ +899e5cf01be55fbf094ad72b2edb0c5df99111ee \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-cbor-2.15.2.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-cbor-2.15.2.jar.sha1 deleted file mode 100644 index 0022265a84b68..0000000000000 --- a/libs/x-content/licenses/jackson-dataformat-cbor-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -baafc85c70765594add14bd93f3efd68e1945b76 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-cbor-2.16.0.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-cbor-2.16.0.jar.sha1 new file mode 100644 index 0000000000000..8da478fc6013d --- /dev/null +++ b/libs/x-content/licenses/jackson-dataformat-cbor-2.16.0.jar.sha1 @@ -0,0 +1 @@ +35e8b7bf4fc1d078766bb155103d433ed5bb1627 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-smile-2.15.2.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-smile-2.15.2.jar.sha1 deleted file mode 100644 index 2b8caad846fec..0000000000000 --- a/libs/x-content/licenses/jackson-dataformat-smile-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -16d1dd22f7d641459ed056399d4f7df0220f1176 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-smile-2.16.0.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-smile-2.16.0.jar.sha1 new file mode 100644 index 0000000000000..3e952ffe92418 --- /dev/null +++ b/libs/x-content/licenses/jackson-dataformat-smile-2.16.0.jar.sha1 @@ -0,0 +1 @@ +3c422d7f3901c9a1becf9df3cf41efc68a5ab95c \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-yaml-2.15.2.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-yaml-2.15.2.jar.sha1 deleted file mode 100644 index 4ad7255e2318f..0000000000000 --- a/libs/x-content/licenses/jackson-dataformat-yaml-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -58194ff9f51915ad6bf6b6f24818232d7566418a \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-yaml-2.16.0.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-yaml-2.16.0.jar.sha1 new file mode 100644 index 0000000000000..d62b5874ab023 --- /dev/null +++ b/libs/x-content/licenses/jackson-dataformat-yaml-2.16.0.jar.sha1 @@ -0,0 +1 @@ +2033e2c5f531785d17f3a2bc31842e3bbb7983b2 \ No newline at end of file diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/IngestCommonModulePlugin.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/IngestCommonModulePlugin.java index 0f8422ea474d2..a2a51d968e078 100644 --- a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/IngestCommonModulePlugin.java +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/IngestCommonModulePlugin.java @@ -98,7 +98,7 @@ public Map getProcessors(Processor.Parameters paramet processors.put(ScriptProcessor.TYPE, new ScriptProcessor.Factory(parameters.scriptService)); processors.put(DotExpanderProcessor.TYPE, new DotExpanderProcessor.Factory()); processors.put(JsonProcessor.TYPE, new JsonProcessor.Factory()); - processors.put(KeyValueProcessor.TYPE, new KeyValueProcessor.Factory()); + processors.put(KeyValueProcessor.TYPE, new KeyValueProcessor.Factory(parameters.scriptService)); processors.put(URLDecodeProcessor.TYPE, new URLDecodeProcessor.Factory()); processors.put(BytesProcessor.TYPE, new BytesProcessor.Factory()); processors.put(PipelineProcessor.TYPE, new PipelineProcessor.Factory(parameters.ingestService)); diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/KeyValueProcessor.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/KeyValueProcessor.java index ff3cca4ce111f..73f03b3cb2e0f 100644 --- a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/KeyValueProcessor.java +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/KeyValueProcessor.java @@ -33,10 +33,13 @@ package org.opensearch.ingest.common; import org.opensearch.common.util.set.Sets; +import org.opensearch.core.common.Strings; import org.opensearch.ingest.AbstractProcessor; import org.opensearch.ingest.ConfigurationUtils; import org.opensearch.ingest.IngestDocument; import org.opensearch.ingest.Processor; +import org.opensearch.script.ScriptService; +import org.opensearch.script.TemplateScript; import java.util.Collections; import java.util.List; @@ -56,24 +59,24 @@ public final class KeyValueProcessor extends AbstractProcessor { private static final Pattern STRIP_BRACKETS = Pattern.compile("(^[\\(\\[<\"'])|([\\]\\)>\"']$)"); - private final String field; + private final TemplateScript.Factory field; private final String fieldSplit; private final String valueSplit; private final Set includeKeys; private final Set excludeKeys; - private final String targetField; + private final TemplateScript.Factory targetField; private final boolean ignoreMissing; private final Consumer execution; KeyValueProcessor( String tag, String description, - String field, + TemplateScript.Factory field, String fieldSplit, String valueSplit, Set includeKeys, Set excludeKeys, - String targetField, + TemplateScript.Factory targetField, boolean ignoreMissing, String trimKey, String trimValue, @@ -106,10 +109,10 @@ public final class KeyValueProcessor extends AbstractProcessor { private static Consumer buildExecution( String fieldSplit, String valueSplit, - String field, + TemplateScript.Factory field, Set includeKeys, Set excludeKeys, - String targetField, + TemplateScript.Factory targetField, boolean ignoreMissing, String trimKey, String trimValue, @@ -130,41 +133,62 @@ private static Consumer buildExecution( keyFilter = key -> includeKeys.contains(key) && excludeKeys.contains(key) == false; } } - final String fieldPathPrefix; - String keyPrefix = prefix == null ? "" : prefix; - if (targetField == null) { - fieldPathPrefix = keyPrefix; - } else { - fieldPathPrefix = targetField + "." + keyPrefix; - } - final Function keyPrefixer; - if (fieldPathPrefix.isEmpty()) { - keyPrefixer = val -> val; - } else { - keyPrefixer = val -> fieldPathPrefix + val; - } - final Function fieldSplitter = buildSplitter(fieldSplit, true); - Function valueSplitter = buildSplitter(valueSplit, false); - final Function keyTrimmer = buildTrimmer(trimKey); - final Function bracketStrip; - if (stripBrackets) { - bracketStrip = val -> STRIP_BRACKETS.matcher(val).replaceAll(""); - } else { - bracketStrip = val -> val; - } - final Function valueTrimmer = buildTrimmer(trimValue); + return document -> { - String value = document.getFieldValue(field, String.class, ignoreMissing); + final String fieldPathPrefix; + String keyPrefix = prefix == null ? "" : prefix; + if (targetField != null) { + String targetFieldPath = document.renderTemplate(targetField); + if (!Strings.isNullOrEmpty((targetFieldPath))) { + fieldPathPrefix = targetFieldPath + "." + keyPrefix; + } else { + fieldPathPrefix = keyPrefix; + } + } else { + fieldPathPrefix = keyPrefix; + } + + final Function keyPrefixer; + if (fieldPathPrefix.isEmpty()) { + keyPrefixer = val -> val; + } else { + keyPrefixer = val -> fieldPathPrefix + val; + } + final Function fieldSplitter = buildSplitter(fieldSplit, true); + Function valueSplitter = buildSplitter(valueSplit, false); + final Function keyTrimmer = buildTrimmer(trimKey); + final Function bracketStrip; + if (stripBrackets) { + bracketStrip = val -> STRIP_BRACKETS.matcher(val).replaceAll(""); + } else { + bracketStrip = val -> val; + } + final Function valueTrimmer = buildTrimmer(trimValue); + + String path = document.renderTemplate(field); + final boolean fieldPathNullOrEmpty = Strings.isNullOrEmpty(path); + if (fieldPathNullOrEmpty || document.hasField(path, true) == false) { + if (ignoreMissing) { + return; + } else if (fieldPathNullOrEmpty) { + throw new IllegalArgumentException("field path cannot be null nor empty"); + } else { + throw new IllegalArgumentException("field [" + path + "] doesn't exist"); + } + } + + String value = document.getFieldValue(path, String.class, ignoreMissing); if (value == null) { if (ignoreMissing) { return; } - throw new IllegalArgumentException("field [" + field + "] is null, cannot extract key-value pairs."); + throw new IllegalArgumentException("field [" + path + "] is null, cannot extract key-value pairs. "); } + for (String part : fieldSplitter.apply(value)) { String[] kv = valueSplitter.apply(part); if (kv.length != 2) { - throw new IllegalArgumentException("field [" + field + "] does not contain value_split [" + valueSplit + "]"); + throw new IllegalArgumentException("field [" + path + "] does not contain value_split [" + valueSplit + "]"); } String key = keyTrimmer.apply(kv[0]); if (keyFilter.test(key)) { @@ -193,7 +217,7 @@ private static Function buildSplitter(String split, boolean fi } } - String getField() { + TemplateScript.Factory getField() { return field; } @@ -213,7 +237,7 @@ Set getExcludeKeys() { return excludeKeys; } - String getTargetField() { + TemplateScript.Factory getTargetField() { return targetField; } @@ -241,6 +265,12 @@ public String getType() { } public static class Factory implements Processor.Factory { + private final ScriptService scriptService; + + public Factory(ScriptService scriptService) { + this.scriptService = scriptService; + } + @Override public KeyValueProcessor create( Map registry, @@ -249,7 +279,13 @@ public KeyValueProcessor create( Map config ) throws Exception { String field = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "field"); + TemplateScript.Factory fieldTemplate = ConfigurationUtils.compileTemplate(TYPE, processorTag, "field", field, scriptService); String targetField = ConfigurationUtils.readOptionalStringProperty(TYPE, processorTag, config, "target_field"); + TemplateScript.Factory targetFieldTemplate = null; + if (!Strings.isNullOrEmpty(targetField)) { + targetFieldTemplate = ConfigurationUtils.compileTemplate(TYPE, processorTag, "target_field", targetField, scriptService); + } + String fieldSplit = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "field_split"); String valueSplit = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "value_split"); String trimKey = ConfigurationUtils.readOptionalStringProperty(TYPE, processorTag, config, "trim_key"); @@ -270,12 +306,12 @@ public KeyValueProcessor create( return new KeyValueProcessor( processorTag, description, - field, + fieldTemplate, fieldSplit, valueSplit, includeKeys, excludeKeys, - targetField, + targetFieldTemplate, ignoreMissing, trimKey, trimValue, diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/RemoveProcessor.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/RemoveProcessor.java index 93a35eef4d396..bb3d4bca47859 100644 --- a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/RemoveProcessor.java +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/RemoveProcessor.java @@ -33,6 +33,7 @@ package org.opensearch.ingest.common; import org.opensearch.core.common.Strings; +import org.opensearch.index.VersionType; import org.opensearch.ingest.AbstractProcessor; import org.opensearch.ingest.ConfigurationUtils; import org.opensearch.ingest.IngestDocument; @@ -43,6 +44,7 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.stream.Collectors; /** @@ -79,6 +81,24 @@ public IngestDocument execute(IngestDocument document) { throw new IllegalArgumentException("field [" + path + "] doesn't exist"); } } + // cannot remove _index, _version and _version_type. + if (path.equals(IngestDocument.Metadata.INDEX.getFieldName()) + || path.equals(IngestDocument.Metadata.VERSION.getFieldName()) + || path.equals(IngestDocument.Metadata.VERSION_TYPE.getFieldName())) { + throw new IllegalArgumentException("cannot remove metadata field [" + path + "]"); + } + // removing _id is disallowed when there's an external version specified in the request + String versionType = document.getFieldValue(IngestDocument.Metadata.VERSION_TYPE.getFieldName(), String.class); + if (path.equals(IngestDocument.Metadata.ID.getFieldName()) + && !Objects.equals(versionType, VersionType.toString(VersionType.INTERNAL))) { + Long version = document.getFieldValue(IngestDocument.Metadata.VERSION.getFieldName(), Long.class); + throw new IllegalArgumentException( + "cannot remove metadata field [_id] when specifying external version for the document, version: " + + version + + ", version_type: " + + versionType + ); + } document.removeField(path); }); return document; diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/KeyValueProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/KeyValueProcessorFactoryTests.java index 62060a682c0cb..78972ff8d5dea 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/KeyValueProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/KeyValueProcessorFactoryTests.java @@ -35,7 +35,9 @@ import org.opensearch.OpenSearchException; import org.opensearch.OpenSearchParseException; import org.opensearch.common.util.set.Sets; +import org.opensearch.ingest.TestTemplateService; import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; import java.util.Arrays; import java.util.Collections; @@ -48,8 +50,14 @@ public class KeyValueProcessorFactoryTests extends OpenSearchTestCase { + private KeyValueProcessor.Factory factory; + + @Before + public void init() { + factory = new KeyValueProcessor.Factory(TestTemplateService.instance()); + } + public void testCreateWithDefaults() throws Exception { - KeyValueProcessor.Factory factory = new KeyValueProcessor.Factory(); Map config = new HashMap<>(); config.put("field", "field1"); config.put("field_split", "&"); @@ -57,7 +65,7 @@ public void testCreateWithDefaults() throws Exception { String processorTag = randomAlphaOfLength(10); KeyValueProcessor processor = factory.create(null, processorTag, null, config); assertThat(processor.getTag(), equalTo(processorTag)); - assertThat(processor.getField(), equalTo("field1")); + assertThat(processor.getField().newInstance(Collections.emptyMap()).execute(), equalTo("field1")); assertThat(processor.getFieldSplit(), equalTo("&")); assertThat(processor.getValueSplit(), equalTo("=")); assertThat(processor.getIncludeKeys(), is(nullValue())); @@ -66,7 +74,6 @@ public void testCreateWithDefaults() throws Exception { } public void testCreateWithAllFieldsSet() throws Exception { - KeyValueProcessor.Factory factory = new KeyValueProcessor.Factory(); Map config = new HashMap<>(); config.put("field", "field1"); config.put("field_split", "&"); @@ -78,17 +85,16 @@ public void testCreateWithAllFieldsSet() throws Exception { String processorTag = randomAlphaOfLength(10); KeyValueProcessor processor = factory.create(null, processorTag, null, config); assertThat(processor.getTag(), equalTo(processorTag)); - assertThat(processor.getField(), equalTo("field1")); + assertThat(processor.getField().newInstance(Collections.emptyMap()).execute(), equalTo("field1")); assertThat(processor.getFieldSplit(), equalTo("&")); assertThat(processor.getValueSplit(), equalTo("=")); assertThat(processor.getIncludeKeys(), equalTo(Sets.newHashSet("a", "b"))); assertThat(processor.getExcludeKeys(), equalTo(Collections.emptySet())); - assertThat(processor.getTargetField(), equalTo("target")); + assertThat(processor.getTargetField().newInstance(Collections.emptyMap()).execute(), equalTo("target")); assertTrue(processor.isIgnoreMissing()); } public void testCreateWithMissingField() { - KeyValueProcessor.Factory factory = new KeyValueProcessor.Factory(); Map config = new HashMap<>(); String processorTag = randomAlphaOfLength(10); OpenSearchException exception = expectThrows( @@ -99,7 +105,6 @@ public void testCreateWithMissingField() { } public void testCreateWithMissingFieldSplit() { - KeyValueProcessor.Factory factory = new KeyValueProcessor.Factory(); Map config = new HashMap<>(); config.put("field", "field1"); String processorTag = randomAlphaOfLength(10); @@ -111,7 +116,6 @@ public void testCreateWithMissingFieldSplit() { } public void testCreateWithMissingValueSplit() { - KeyValueProcessor.Factory factory = new KeyValueProcessor.Factory(); Map config = new HashMap<>(); config.put("field", "field1"); config.put("field_split", "&"); diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/KeyValueProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/KeyValueProcessorTests.java index 685a78e2e769b..5f71ea6f16a4f 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/KeyValueProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/KeyValueProcessorTests.java @@ -36,6 +36,7 @@ import org.opensearch.ingest.IngestDocument; import org.opensearch.ingest.Processor; import org.opensearch.ingest.RandomDocumentPicks; +import org.opensearch.ingest.TestTemplateService; import org.opensearch.test.OpenSearchTestCase; import java.util.ArrayList; @@ -51,7 +52,7 @@ public class KeyValueProcessorTests extends OpenSearchTestCase { - private static final KeyValueProcessor.Factory FACTORY = new KeyValueProcessor.Factory(); + private static final KeyValueProcessor.Factory FACTORY = new KeyValueProcessor.Factory(TestTemplateService.instance()); public void test() throws Exception { IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); @@ -123,7 +124,12 @@ public void testMissingField() throws Exception { IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.emptyMap()); Processor processor = createKvProcessor("unknown", "&", "=", null, null, "target", false); IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> processor.execute(ingestDocument)); - assertThat(exception.getMessage(), equalTo("field [unknown] not present as part of path [unknown]")); + assertThat(exception.getMessage(), equalTo("field [unknown] doesn't exist")); + + // when using template snippet, the resolved field path maybe empty + Processor processorWithEmptyFieldPath = createKvProcessor("", "&", "=", null, null, "target", false); + exception = expectThrows(IllegalArgumentException.class, () -> processorWithEmptyFieldPath.execute(ingestDocument)); + assertThat(exception.getMessage(), equalTo("field path cannot be null nor empty")); } public void testNullValueWithIgnoreMissing() throws Exception { diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveProcessorTests.java index 8f729c6a39bbd..1a5630a4730f2 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveProcessorTests.java @@ -32,6 +32,7 @@ package org.opensearch.ingest.common; +import org.opensearch.index.VersionType; import org.opensearch.ingest.IngestDocument; import org.opensearch.ingest.Processor; import org.opensearch.ingest.RandomDocumentPicks; @@ -40,7 +41,9 @@ import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; +import java.util.stream.Collectors; import static org.hamcrest.Matchers.equalTo; @@ -48,7 +51,7 @@ public class RemoveProcessorTests extends OpenSearchTestCase { public void testRemoveFields() throws Exception { IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); - String field = RandomDocumentPicks.randomExistingFieldName(random(), ingestDocument); + String field = RandomDocumentPicks.addRandomField(random(), ingestDocument, randomAlphaOfLength(10)); Processor processor = new RemoveProcessor( randomAlphaOfLength(10), null, @@ -124,4 +127,58 @@ public void testIgnoreMissing() throws Exception { processor = new RemoveProcessor.Factory(TestTemplateService.instance()).create(null, processorTag, null, configWithEmptyField); processor.execute(ingestDocument); } + + public void testRemoveMetadataField() throws Exception { + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>()); + List metadataFields = ingestDocument.getMetadata() + .keySet() + .stream() + .map(IngestDocument.Metadata::getFieldName) + .collect(Collectors.toList()); + + for (String metadataFieldName : metadataFields) { + Map config = new HashMap<>(); + config.put("field", metadataFieldName); + String processorTag = randomAlphaOfLength(10); + Processor processor = new RemoveProcessor.Factory(TestTemplateService.instance()).create(null, processorTag, null, config); + // _if_seq_no and _if_primary_term do not exist in the enriched document, removing them will throw IllegalArgumentException + if (metadataFieldName.equals(IngestDocument.Metadata.IF_SEQ_NO.getFieldName()) + || metadataFieldName.equals(IngestDocument.Metadata.IF_PRIMARY_TERM.getFieldName())) { + assertThrows( + "field: [" + metadataFieldName + "] doesn't exist", + IllegalArgumentException.class, + () -> processor.execute(ingestDocument) + ); + } else if (metadataFieldName.equals(IngestDocument.Metadata.INDEX.getFieldName()) + || metadataFieldName.equals(IngestDocument.Metadata.VERSION.getFieldName()) + || metadataFieldName.equals(IngestDocument.Metadata.VERSION_TYPE.getFieldName())) { + // _index, _version and _version_type cannot be removed + assertThrows( + "cannot remove metadata field [" + metadataFieldName + "]", + IllegalArgumentException.class, + () -> processor.execute(ingestDocument) + ); + } else if (metadataFieldName.equals(IngestDocument.Metadata.ID.getFieldName())) { + Long version = ingestDocument.getFieldValue(IngestDocument.Metadata.VERSION.getFieldName(), Long.class); + String versionType = ingestDocument.getFieldValue(IngestDocument.Metadata.VERSION_TYPE.getFieldName(), String.class); + if (!versionType.equals(VersionType.toString(VersionType.INTERNAL))) { + assertThrows( + "cannot remove metadata field [_id] when specifying external version for the document, version: " + + version + + ", version_type: " + + versionType, + IllegalArgumentException.class, + () -> processor.execute(ingestDocument) + ); + } else { + processor.execute(ingestDocument); + assertThat(ingestDocument.hasField(metadataFieldName), equalTo(false)); + } + } else if (metadataFieldName.equals(IngestDocument.Metadata.ROUTING.getFieldName()) + && ingestDocument.hasField(IngestDocument.Metadata.ROUTING.getFieldName())) { + processor.execute(ingestDocument); + assertThat(ingestDocument.hasField(metadataFieldName), equalTo(false)); + } + } + } } diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/150_kv.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/150_kv.yml index 836243652b2e0..30a0a520b5c40 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/150_kv.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/150_kv.yml @@ -39,3 +39,151 @@ teardown: id: 1 - match: { _source.goodbye: "everybody" } - match: { _source.hello: "world" } + +--- +"Test KV Processor with template snippets": + - skip: + version: " - 2.11.99" + reason: "KV Processor with template snippets is only supported since 2.12.0" + + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "kv" : { + "field" : "{{source}}", + "target_field" : "{{target}}", + "field_split": " ", + "value_split": "=" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + source: "foo", + target: "zoo", + foo: "goodbye=everybody hello=world" + } + + - do: + get: + index: test + id: 1 + - match: { _source.zoo.goodbye: "everybody" } + - match: { _source.zoo.hello: "world" } + +--- +"Test KV Processor with non-existing field and without ignore_missing": + - skip: + version: " - 2.11.99" + reason: "KV Processor with template snippets is only supported since 2.12.0" + + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "kv" : { + "field" : "{{source}}", + "target_field" : "{{target}}", + "field_split": " ", + "value_split": "=" + } + } + ] + } + - match: { acknowledged: true } + + - do: + catch: /field path cannot be null nor empty/ + index: + index: test + id: 1 + pipeline: "1" + body: { + target: "zoo", + foo: "goodbye=everybody hello=world" + } + + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "kv" : { + "field" : "{{source}}", + "target_field" : "{{target}}", + "field_split": " ", + "value_split": "=" + } + } + ] + } + - match: { acknowledged: true } + + - do: + catch: /field \[unknown\] doesn\'t exist/ + index: + index: test + id: 1 + pipeline: "1" + body: { + source: "unknown", + target: "zoo", + foo: "goodbye=everybody hello=world" + } + +--- +"Test KV Processor with non-existing field and ignore_missing": + - skip: + version: " - 2.11.99" + reason: "KV Processor with template snippets is only supported since 2.12.0" + + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "kv" : { + "field" : "{{source}}", + "target_field" : "{{target}}", + "field_split": " ", + "value_split": "=", + "ignore_missing": true + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + target: "zoo", + foo: "goodbye=everybody hello=world" + } + + - do: + get: + index: test + id: 1 + - match: { _source: { target: "zoo", foo: "goodbye=everybody hello=world"}} diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/290_remove_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/290_remove_processor.yml index ff5a17136afa2..4811769d04f0e 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/290_remove_processor.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/290_remove_processor.yml @@ -91,3 +91,139 @@ teardown: index: test id: 1 - match: { _source.message: "foo bar baz" } + +#Related issue: https://github.com/opensearch-project/OpenSearch/issues/10732 +--- +"Test remove metadata field": + - skip: + version: " - 2.11.99" + reason: "The bug was fixed in 2.12" + + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "remove" : { + "field" : "{{foo}}" + } + } + ] + } + - match: { acknowledged: true } + + - do: + catch: /cannot remove metadata field \[\_index\]/ + index: + index: test + id: 1 + pipeline: "my_pipeline" + body: { + foo: "_index" + } + + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "remove" : { + "field" : "_version" + } + } + ] + } + - match: { acknowledged: true } + + - do: + catch: /cannot remove metadata field \[\_version\]/ + index: + index: test + id: 1 + pipeline: "my_pipeline" + body: { + foo: "bar" + } + + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "remove" : { + "field" : "_version_type" + } + } + ] + } + - match: { acknowledged: true } + + - do: + catch: /cannot remove metadata field \[\_version\_type\]/ + index: + index: test + id: 1 + pipeline: "my_pipeline" + body: { + foo: "bar" + } + + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "remove" : { + "field" : ["_id", "_routing"] + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + routing: abc + pipeline: "my_pipeline" + body: { message: "foo bar baz" } + - match: { result: created } + + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "remove" : { + "field" : "_id" + } + } + ] + } + - match: { acknowledged: true } + + - do: + catch: /cannot remove metadata field \[\_id\] when specifying external version for the document/ + index: + index: test + id: "test_id_10000" + pipeline: "my_pipeline" + version: 1 + version_type: "external" + body: { message: "foo bar baz" } diff --git a/modules/ingest-geoip/licenses/jackson-annotations-2.15.2.jar.sha1 b/modules/ingest-geoip/licenses/jackson-annotations-2.15.2.jar.sha1 deleted file mode 100644 index f63416ddb8ceb..0000000000000 --- a/modules/ingest-geoip/licenses/jackson-annotations-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4724a65ac8e8d156a24898d50fd5dbd3642870b8 \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/jackson-annotations-2.16.0.jar.sha1 b/modules/ingest-geoip/licenses/jackson-annotations-2.16.0.jar.sha1 new file mode 100644 index 0000000000000..79ed9e0c63fc8 --- /dev/null +++ b/modules/ingest-geoip/licenses/jackson-annotations-2.16.0.jar.sha1 @@ -0,0 +1 @@ +dc30995f7428c0a405eba9b8c619b20d2b3b9905 \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/jackson-databind-2.15.2.jar.sha1 b/modules/ingest-geoip/licenses/jackson-databind-2.15.2.jar.sha1 deleted file mode 100644 index f16d80af8dce6..0000000000000 --- a/modules/ingest-geoip/licenses/jackson-databind-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9353b021f10c307c00328f52090de2bdb4b6ff9c \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/jackson-databind-2.16.0.jar.sha1 b/modules/ingest-geoip/licenses/jackson-databind-2.16.0.jar.sha1 new file mode 100644 index 0000000000000..da00d281934b1 --- /dev/null +++ b/modules/ingest-geoip/licenses/jackson-databind-2.16.0.jar.sha1 @@ -0,0 +1 @@ +3a6b7f8ff7b30d518bbd65678e9c30cd881f19a7 \ No newline at end of file diff --git a/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/MoreExpressionIT.java b/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/MoreExpressionIT.java index bb2f652168d5c..8ca28a905f216 100644 --- a/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/MoreExpressionIT.java +++ b/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/MoreExpressionIT.java @@ -504,10 +504,6 @@ public void testInvalidFieldMember() { } public void testSpecialValueVariable() throws Exception { - assumeFalse( - "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/10079", - internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) - ); // i.e. _value for aggregations createIndex("test"); ensureGreen("test"); diff --git a/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionAggregationScript.java b/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionAggregationScript.java index 5eebb9c4d60ad..a2af636ffdc8a 100644 --- a/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionAggregationScript.java +++ b/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionAggregationScript.java @@ -53,9 +53,9 @@ class ExpressionAggregationScript implements AggregationScript.LeafFactory { final SimpleBindings bindings; final DoubleValuesSource source; final boolean needsScore; - final ReplaceableConstDoubleValueSource specialValue; // _value + final PerThreadReplaceableConstDoubleValueSource specialValue; // _value - ExpressionAggregationScript(Expression e, SimpleBindings b, boolean n, ReplaceableConstDoubleValueSource v) { + ExpressionAggregationScript(Expression e, SimpleBindings b, boolean n, PerThreadReplaceableConstDoubleValueSource v) { exprScript = e; bindings = b; source = exprScript.getDoubleValuesSource(bindings); diff --git a/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionScriptEngine.java b/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionScriptEngine.java index 11b2e20eea523..5629b3b4a6972 100644 --- a/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionScriptEngine.java +++ b/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionScriptEngine.java @@ -316,14 +316,14 @@ private static AggregationScript.LeafFactory newAggregationScript( // instead of complicating SimpleBindings (which should stay simple) SimpleBindings bindings = new SimpleBindings(); boolean needsScores = false; - ReplaceableConstDoubleValueSource specialValue = null; + PerThreadReplaceableConstDoubleValueSource specialValue = null; for (String variable : expr.variables) { try { if (variable.equals("_score")) { bindings.add("_score", DoubleValuesSource.SCORES); needsScores = true; } else if (variable.equals("_value")) { - specialValue = new ReplaceableConstDoubleValueSource(); + specialValue = new PerThreadReplaceableConstDoubleValueSource(); bindings.add("_value", specialValue); // noop: _value is special for aggregations, and is handled in ExpressionScriptBindings // TODO: if some uses it in a scoring expression, they will get a nasty failure when evaluating...need a @@ -388,7 +388,7 @@ private static ScoreScript.LeafFactory newScoreScript(Expression expr, SearchLoo // NOTE: if we need to do anything complicated with bindings in the future, we can just extend Bindings, // instead of complicating SimpleBindings (which should stay simple) SimpleBindings bindings = new SimpleBindings(); - ReplaceableConstDoubleValueSource specialValue = null; + PerThreadReplaceableConstDoubleValueSource specialValue = null; boolean needsScores = false; for (String variable : expr.variables) { try { @@ -396,7 +396,7 @@ private static ScoreScript.LeafFactory newScoreScript(Expression expr, SearchLoo bindings.add("_score", DoubleValuesSource.SCORES); needsScores = true; } else if (variable.equals("_value")) { - specialValue = new ReplaceableConstDoubleValueSource(); + specialValue = new PerThreadReplaceableConstDoubleValueSource(); bindings.add("_value", specialValue); // noop: _value is special for aggregations, and is handled in ExpressionScriptBindings // TODO: if some uses it in a scoring expression, they will get a nasty failure when evaluating...need a diff --git a/modules/lang-expression/src/main/java/org/opensearch/script/expression/ReplaceableConstDoubleValueSource.java b/modules/lang-expression/src/main/java/org/opensearch/script/expression/PerThreadReplaceableConstDoubleValueSource.java similarity index 62% rename from modules/lang-expression/src/main/java/org/opensearch/script/expression/ReplaceableConstDoubleValueSource.java rename to modules/lang-expression/src/main/java/org/opensearch/script/expression/PerThreadReplaceableConstDoubleValueSource.java index 28e4707a07192..40bb957c248f2 100644 --- a/modules/lang-expression/src/main/java/org/opensearch/script/expression/ReplaceableConstDoubleValueSource.java +++ b/modules/lang-expression/src/main/java/org/opensearch/script/expression/PerThreadReplaceableConstDoubleValueSource.java @@ -39,20 +39,25 @@ import org.apache.lucene.search.IndexSearcher; import java.io.IOException; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; /** - * A {@link DoubleValuesSource} which has a stub {@link DoubleValues} that holds a dynamically replaceable constant double. + * A {@link DoubleValuesSource} which has a stub {@link DoubleValues} that holds a dynamically replaceable constant double. This is made + * thread-safe for concurrent segment search use case by keeping the {@link DoubleValues} per thread. Any update to the value happens in + * thread specific {@link DoubleValuesSource} instance. */ -final class ReplaceableConstDoubleValueSource extends DoubleValuesSource { - final ReplaceableConstDoubleValues fv; +final class PerThreadReplaceableConstDoubleValueSource extends DoubleValuesSource { + // Multiple slices can be processed by same thread but that will be sequential, so keeping per thread is fine + final Map perThreadDoubleValues; - ReplaceableConstDoubleValueSource() { - fv = new ReplaceableConstDoubleValues(); + PerThreadReplaceableConstDoubleValueSource() { + perThreadDoubleValues = new ConcurrentHashMap<>(); } @Override public DoubleValues getValues(LeafReaderContext ctx, DoubleValues scores) throws IOException { - return fv; + return perThreadDoubleValues.computeIfAbsent(Thread.currentThread().getId(), threadId -> new ReplaceableConstDoubleValues()); } @Override @@ -62,7 +67,11 @@ public boolean needsScores() { @Override public Explanation explain(LeafReaderContext ctx, int docId, Explanation scoreExplanation) throws IOException { - if (fv.advanceExact(docId)) return Explanation.match((float) fv.doubleValue(), "ReplaceableConstDoubleValues"); + final ReplaceableConstDoubleValues currentFv = perThreadDoubleValues.computeIfAbsent( + Thread.currentThread().getId(), + threadId -> new ReplaceableConstDoubleValues() + ); + if (currentFv.advanceExact(docId)) return Explanation.match((float) currentFv.doubleValue(), "ReplaceableConstDoubleValues"); else return Explanation.noMatch("ReplaceableConstDoubleValues"); } @@ -77,7 +86,11 @@ public int hashCode() { } public void setValue(double v) { - fv.setValue(v); + final ReplaceableConstDoubleValues currentFv = perThreadDoubleValues.computeIfAbsent( + Thread.currentThread().getId(), + threadId -> new ReplaceableConstDoubleValues() + ); + currentFv.setValue(v); } @Override diff --git a/modules/lang-painless/build.gradle b/modules/lang-painless/build.gradle index d7af8621c478a..fb51a0bb7f157 100644 --- a/modules/lang-painless/build.gradle +++ b/modules/lang-painless/build.gradle @@ -33,7 +33,6 @@ import com.github.jengelman.gradle.plugins.shadow.ShadowBasePlugin apply plugin: 'opensearch.validate-rest-spec' apply plugin: 'opensearch.yaml-rest-test' -apply plugin: 'com.github.johnrengelman.shadow' opensearchplugin { description 'An easy, safe and fast scripting language for OpenSearch' @@ -62,30 +61,6 @@ dependencies { api project('spi') } -test { - doFirst { - test.classpath -= project.files(project.tasks.named('shadowJar')) - test.classpath -= project.configurations.getByName(ShadowBasePlugin.CONFIGURATION_NAME) - test.classpath += project.extensions.getByType(SourceSetContainer).getByName(SourceSet.MAIN_SOURCE_SET_NAME).runtimeClasspath - } -} - -shadowJar { - archiveClassifier.set('') - relocate 'org.objectweb', 'org.opensearch.repackage.org.objectweb' - dependencies { - include(dependency("org.ow2.asm:asm:${versions.asm}")) - include(dependency("org.ow2.asm:asm-util:${versions.asm}")) - include(dependency("org.ow2.asm:asm-tree:${versions.asm}")) - include(dependency("org.ow2.asm:asm-commons:${versions.asm}")) - include(dependency("org.ow2.asm:asm-analysis:${versions.asm}")) - } -} - -tasks.validateNebulaPom.dependsOn tasks.generatePomFileForShadowPublication -tasks.validateShadowPom.dependsOn tasks.generatePomFileForNebulaPublication -tasks.withType(AbstractPublishToMaven)*.dependsOn "generatePomFileForShadowPublication", "generatePomFileForNebulaPublication" - tasks.named("dependencyLicenses").configure { mapping from: /asm-.*/, to: 'asm' } diff --git a/modules/lang-painless/spi/build.gradle b/modules/lang-painless/spi/build.gradle index 59a77870b4987..32556f907fdc0 100644 --- a/modules/lang-painless/spi/build.gradle +++ b/modules/lang-painless/spi/build.gradle @@ -33,7 +33,7 @@ apply plugin: 'opensearch.publish' base { group = 'org.opensearch.plugin' - archivesBaseName = 'opensearch-scripting-painless-spi' + archivesName = 'opensearch-scripting-painless-spi' } dependencies { diff --git a/modules/reindex/src/internalClusterTest/java/org/opensearch/index/codec/MultiCodecReindexIT.java b/modules/reindex/src/internalClusterTest/java/org/opensearch/index/codec/MultiCodecReindexIT.java deleted file mode 100644 index 604c233ca49c4..0000000000000 --- a/modules/reindex/src/internalClusterTest/java/org/opensearch/index/codec/MultiCodecReindexIT.java +++ /dev/null @@ -1,198 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.codec; - -import org.opensearch.action.admin.indices.flush.FlushResponse; -import org.opensearch.action.admin.indices.refresh.RefreshResponse; -import org.opensearch.action.admin.indices.segments.IndicesSegmentsRequest; -import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest; -import org.opensearch.action.support.ActiveShardCount; -import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.common.settings.Settings; -import org.opensearch.index.engine.Segment; -import org.opensearch.index.reindex.BulkByScrollResponse; -import org.opensearch.index.reindex.ReindexAction; -import org.opensearch.index.reindex.ReindexModulePlugin; -import org.opensearch.index.reindex.ReindexRequestBuilder; -import org.opensearch.index.reindex.ReindexTestCase; -import org.opensearch.plugins.Plugin; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import java.util.concurrent.ExecutionException; -import java.util.stream.Collectors; -import java.util.stream.IntStream; - -import static java.util.stream.Collectors.toList; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_BLOCKS_METADATA; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_BLOCKS_READ; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_BLOCKS_WRITE; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_READ_ONLY; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_READ_ONLY_ALLOW_DELETE; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; - -public class MultiCodecReindexIT extends ReindexTestCase { - - @Override - protected Collection> nodePlugins() { - return List.of(ReindexModulePlugin.class); - } - - public void testReindexingMultipleCodecs() throws InterruptedException, ExecutionException { - internalCluster().ensureAtLeastNumDataNodes(1); - Map codecMap = Map.of( - "best_compression", - "BEST_COMPRESSION", - "zlib", - "BEST_COMPRESSION", - "default", - "BEST_SPEED", - "lz4", - "BEST_SPEED" - ); - - for (Map.Entry codec : codecMap.entrySet()) { - assertReindexingWithMultipleCodecs(codec.getKey(), codec.getValue(), codecMap); - } - - } - - private void assertReindexingWithMultipleCodecs(String destCodec, String destCodecMode, Map codecMap) - throws ExecutionException, InterruptedException { - - final String index = "test-index" + destCodec; - final String destIndex = "dest-index" + destCodec; - - // creating source index - createIndex( - index, - Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put("index.codec", "default") - .put("index.merge.policy.max_merged_segment", "1b") - .build() - ); - ensureGreen(index); - - final int nbDocs = randomIntBetween(2, 5); - - // indexing with all 4 codecs - for (Map.Entry codec : codecMap.entrySet()) { - useCodec(index, codec.getKey()); - ingestDocs(index, nbDocs); - } - - assertTrue( - getSegments(index).stream() - .flatMap(s -> s.getAttributes().values().stream()) - .collect(Collectors.toSet()) - .containsAll(codecMap.values()) - ); - - // creating destination index with destination codec - createIndex( - destIndex, - Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put("index.codec", destCodec) - .build() - ); - - BulkByScrollResponse bulkResponse = new ReindexRequestBuilder(client(), ReindexAction.INSTANCE).source(index) - .destination(destIndex) - .refresh(true) - .waitForActiveShards(ActiveShardCount.ONE) - .get(); - - assertEquals(codecMap.size() * nbDocs, bulkResponse.getCreated()); - assertEquals(codecMap.size() * nbDocs, bulkResponse.getTotal()); - assertEquals(0, bulkResponse.getDeleted()); - assertEquals(0, bulkResponse.getNoops()); - assertEquals(0, bulkResponse.getVersionConflicts()); - assertEquals(1, bulkResponse.getBatches()); - assertTrue(bulkResponse.getTook().getMillis() > 0); - assertEquals(0, bulkResponse.getBulkFailures().size()); - assertEquals(0, bulkResponse.getSearchFailures().size()); - assertTrue(getSegments(destIndex).stream().allMatch(segment -> segment.attributes.containsValue(destCodecMode))); - } - - private void useCodec(String index, String codec) throws ExecutionException, InterruptedException { - assertAcked(client().admin().indices().prepareClose(index).setWaitForActiveShards(1)); - - assertAcked( - client().admin() - .indices() - .updateSettings(new UpdateSettingsRequest(index).settings(Settings.builder().put("index.codec", codec))) - .get() - ); - - assertAcked(client().admin().indices().prepareOpen(index).setWaitForActiveShards(1)); - } - - private void flushAndRefreshIndex(String index) { - - // Request is not blocked - for (String blockSetting : Arrays.asList( - SETTING_BLOCKS_READ, - SETTING_BLOCKS_WRITE, - SETTING_READ_ONLY, - SETTING_BLOCKS_METADATA, - SETTING_READ_ONLY_ALLOW_DELETE - )) { - try { - enableIndexBlock(index, blockSetting); - // flush - FlushResponse flushResponse = client().admin().indices().prepareFlush(index).setForce(true).execute().actionGet(); - assertNoFailures(flushResponse); - - // refresh - RefreshResponse refreshResponse = client().admin().indices().prepareRefresh(index).execute().actionGet(); - assertNoFailures(refreshResponse); - } finally { - disableIndexBlock(index, blockSetting); - } - } - } - - private void ingestDocs(String index, int nbDocs) throws InterruptedException { - - indexRandom( - randomBoolean(), - false, - randomBoolean(), - IntStream.range(0, nbDocs) - .mapToObj(i -> client().prepareIndex(index).setId(UUID.randomUUID().toString()).setSource("num", i)) - .collect(toList()) - ); - flushAndRefreshIndex(index); - } - - private ArrayList getSegments(String index) { - - return new ArrayList<>( - client().admin() - .indices() - .segments(new IndicesSegmentsRequest(index)) - .actionGet() - .getIndices() - .get(index) - .getShards() - .get(0) - .getShards()[0].getSegments() - ); - } - -} diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/MultiCodecReindexTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/MultiCodecReindexTests.java new file mode 100644 index 0000000000000..53a0545fd2ff7 --- /dev/null +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/MultiCodecReindexTests.java @@ -0,0 +1,160 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.reindex; + +import org.opensearch.action.admin.indices.segments.IndicesSegmentsRequest; +import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.opensearch.action.support.ActiveShardCount; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.MergePolicyProvider; +import org.opensearch.index.engine.Segment; +import org.opensearch.plugins.Plugin; +import org.opensearch.test.InternalSettingsPlugin; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static java.util.stream.Collectors.toList; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +public class MultiCodecReindexTests extends ReindexTestCase { + final static Map codecMap = Map.of( + "best_compression", + "BEST_COMPRESSION", + "zlib", + "BEST_COMPRESSION", + "default", + "BEST_SPEED", + "lz4", + "BEST_SPEED" + ); + final static String[] codecChoices = codecMap.keySet().toArray(String[]::new); + + @Override + protected Collection> nodePlugins() { + return List.of(InternalSettingsPlugin.class, ReindexModulePlugin.class); + } + + public void testReindexingMultipleCodecs() throws InterruptedException, ExecutionException { + for (Map.Entry candidate : codecMap.entrySet()) { + final int nbDocs = randomIntBetween(2, 5); + + final String destCodec = candidate.getKey(); + final String destCodecMode = candidate.getValue(); + + final String index = "test-index-" + destCodec; + final String destIndex = "dest-index-" + destCodec; + + // create source index + createIndex( + index, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.codec", randomFrom(codecChoices)) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) + .build() + ); + ensureGreen(index); + + // index using all codecs + for (String codec : codecMap.keySet()) { + useCodec(index, codec); + ingestDocs(index, nbDocs); + } + + assertTrue( + getSegments(index).stream() + .flatMap(s -> s.getAttributes().values().stream()) + .collect(Collectors.toSet()) + .containsAll(codecMap.values()) + ); + + // create destination index with destination codec + createIndex( + destIndex, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.codec", destCodec) + .build() + ); + ensureGreen(destIndex); + + // perform reindex + BulkByScrollResponse response = reindex().source(index) + .destination(destIndex) + .refresh(true) + .waitForActiveShards(ActiveShardCount.ONE) + .get(); + final int expectedResponseSize = codecMap.size() * nbDocs; + + // assertions + assertEquals(0, response.getNoops()); + assertEquals(1, response.getBatches()); + assertEquals(0, response.getDeleted()); + assertEquals(0, response.getVersionConflicts()); + assertEquals(0, response.getBulkFailures().size()); + assertEquals(0, response.getSearchFailures().size()); + + assertEquals(expectedResponseSize, response.getTotal()); + assertEquals(expectedResponseSize, response.getCreated()); + + assertTrue(response.getTook().getMillis() > 0); + assertTrue(getSegments(destIndex).stream().allMatch(segment -> segment.attributes.containsValue(destCodecMode))); + } + } + + private void useCodec(String index, String codec) throws ExecutionException, InterruptedException { + assertAcked(client().admin().indices().prepareClose(index).setWaitForActiveShards(1)); + + assertAcked( + client().admin() + .indices() + .updateSettings(new UpdateSettingsRequest(index).settings(Settings.builder().put("index.codec", codec))) + .get() + ); + + assertAcked(client().admin().indices().prepareOpen(index).setWaitForActiveShards(1)); + } + + private void ingestDocs(String index, int nbDocs) throws InterruptedException { + indexRandom( + randomBoolean(), + false, + randomBoolean(), + IntStream.range(0, nbDocs) + .mapToObj(i -> client().prepareIndex(index).setId(UUID.randomUUID().toString()).setSource("num", i)) + .collect(toList()) + ); + + flushAndRefresh(index); + } + + private ArrayList getSegments(String index) { + return new ArrayList<>( + client().admin() + .indices() + .segments(new IndicesSegmentsRequest(index)) + .actionGet() + .getIndices() + .get(index) + .getShards() + .get(0) + .getShards()[0].getSegments() + ); + } +} diff --git a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/BasicMap.java b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/BasicMap.java new file mode 100644 index 0000000000000..6ddc22420416b --- /dev/null +++ b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/BasicMap.java @@ -0,0 +1,126 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pipeline.common; + +import java.util.Collection; +import java.util.Map; +import java.util.Set; +import java.util.function.BiConsumer; +import java.util.function.BiFunction; +import java.util.function.Function; + +/** + * Helper for map abstractions passed to scripting processors. Throws {@link UnsupportedOperationException} for almost + * all methods. Subclasses just need to implement get and put. + */ +abstract class BasicMap implements Map { + + /** + * No-args constructor. + */ + protected BasicMap() {} + + private static final String UNSUPPORTED_OP_ERR = " Method not supported in Search pipeline script"; + + @Override + public boolean isEmpty() { + throw new UnsupportedOperationException("isEmpty" + UNSUPPORTED_OP_ERR); + } + + public int size() { + throw new UnsupportedOperationException("size" + UNSUPPORTED_OP_ERR); + } + + public boolean containsKey(Object key) { + return get(key) != null; + } + + public boolean containsValue(Object value) { + throw new UnsupportedOperationException("containsValue" + UNSUPPORTED_OP_ERR); + } + + public Object remove(Object key) { + throw new UnsupportedOperationException("remove" + UNSUPPORTED_OP_ERR); + } + + public void putAll(Map m) { + throw new UnsupportedOperationException("putAll" + UNSUPPORTED_OP_ERR); + } + + public void clear() { + throw new UnsupportedOperationException("clear" + UNSUPPORTED_OP_ERR); + } + + public Set keySet() { + throw new UnsupportedOperationException("keySet" + UNSUPPORTED_OP_ERR); + } + + public Collection values() { + throw new UnsupportedOperationException("values" + UNSUPPORTED_OP_ERR); + } + + public Set> entrySet() { + throw new UnsupportedOperationException("entrySet" + UNSUPPORTED_OP_ERR); + } + + @Override + public Object getOrDefault(Object key, Object defaultValue) { + throw new UnsupportedOperationException("getOrDefault" + UNSUPPORTED_OP_ERR); + } + + @Override + public void forEach(BiConsumer action) { + throw new UnsupportedOperationException("forEach" + UNSUPPORTED_OP_ERR); + } + + @Override + public void replaceAll(BiFunction function) { + throw new UnsupportedOperationException("replaceAll" + UNSUPPORTED_OP_ERR); + } + + @Override + public Object putIfAbsent(String key, Object value) { + throw new UnsupportedOperationException("putIfAbsent" + UNSUPPORTED_OP_ERR); + } + + @Override + public boolean remove(Object key, Object value) { + throw new UnsupportedOperationException("remove" + UNSUPPORTED_OP_ERR); + } + + @Override + public boolean replace(String key, Object oldValue, Object newValue) { + throw new UnsupportedOperationException("replace" + UNSUPPORTED_OP_ERR); + } + + @Override + public Object replace(String key, Object value) { + throw new UnsupportedOperationException("replace" + UNSUPPORTED_OP_ERR); + } + + @Override + public Object computeIfAbsent(String key, Function mappingFunction) { + throw new UnsupportedOperationException("computeIfAbsent" + UNSUPPORTED_OP_ERR); + } + + @Override + public Object computeIfPresent(String key, BiFunction remappingFunction) { + throw new UnsupportedOperationException("computeIfPresent" + UNSUPPORTED_OP_ERR); + } + + @Override + public Object compute(String key, BiFunction remappingFunction) { + throw new UnsupportedOperationException("compute" + UNSUPPORTED_OP_ERR); + } + + @Override + public Object merge(String key, Object value, BiFunction remappingFunction) { + throw new UnsupportedOperationException("merge" + UNSUPPORTED_OP_ERR); + } +} diff --git a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/CollapseResponseProcessor.java b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/CollapseResponseProcessor.java new file mode 100644 index 0000000000000..3e6c4fef6a559 --- /dev/null +++ b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/CollapseResponseProcessor.java @@ -0,0 +1,122 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pipeline.common; + +import org.opensearch.action.search.SearchRequest; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.document.DocumentField; +import org.opensearch.ingest.ConfigurationUtils; +import org.opensearch.search.SearchHit; +import org.opensearch.search.SearchHits; +import org.opensearch.search.pipeline.AbstractProcessor; +import org.opensearch.search.pipeline.Processor; +import org.opensearch.search.pipeline.SearchResponseProcessor; +import org.opensearch.search.pipeline.common.helpers.SearchResponseUtil; + +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +/** + * A simple implementation of field collapsing on search responses. Note that this is not going to work as well as + * field collapsing at the shard level, as implemented with the "collapse" parameter in a search request. Mostly + * just using this to demo the oversample / truncate_hits processors. + */ +public class CollapseResponseProcessor extends AbstractProcessor implements SearchResponseProcessor { + /** + * Key to reference this processor type from a search pipeline. + */ + public static final String TYPE = "collapse"; + static final String COLLAPSE_FIELD = "field"; + private final String collapseField; + + private CollapseResponseProcessor(String tag, String description, boolean ignoreFailure, String collapseField) { + super(tag, description, ignoreFailure); + this.collapseField = Objects.requireNonNull(collapseField); + } + + @Override + public String getType() { + return TYPE; + } + + @Override + public SearchResponse processResponse(SearchRequest request, SearchResponse response) { + + if (response.getHits() != null) { + if (response.getHits().getCollapseField() != null) { + throw new IllegalStateException( + "Cannot collapse on " + collapseField + ". Results already collapsed on " + response.getHits().getCollapseField() + ); + } + Map collapsedHits = new LinkedHashMap<>(); + List collapseValues = new ArrayList<>(); + for (SearchHit hit : response.getHits()) { + Object fieldValue = null; + DocumentField docField = hit.getFields().get(collapseField); + if (docField != null) { + if (docField.getValues().size() > 1) { + throw new IllegalStateException( + "Failed to collapse " + hit.getId() + ": doc has multiple values for field " + collapseField + ); + } + fieldValue = docField.getValues().get(0); + } else if (hit.getSourceAsMap() != null) { + fieldValue = hit.getSourceAsMap().get(collapseField); + } + String fieldValueString; + if (fieldValue == null) { + fieldValueString = "__missing__"; + } else { + fieldValueString = fieldValue.toString(); + } + + // Results are already sorted by sort criterion. Only keep the first hit for each field. + if (collapsedHits.containsKey(fieldValueString) == false) { + collapsedHits.put(fieldValueString, hit); + collapseValues.add(fieldValue); + } + } + SearchHit[] newHits = new SearchHit[collapsedHits.size()]; + int i = 0; + for (SearchHit collapsedHit : collapsedHits.values()) { + newHits[i++] = collapsedHit; + } + SearchHits searchHits = new SearchHits( + newHits, + response.getHits().getTotalHits(), + response.getHits().getMaxScore(), + response.getHits().getSortFields(), + collapseField, + collapseValues.toArray() + ); + return SearchResponseUtil.replaceHits(searchHits, response); + } + return response; + } + + static class Factory implements Processor.Factory { + + @Override + public CollapseResponseProcessor create( + Map> processorFactories, + String tag, + String description, + boolean ignoreFailure, + Map config, + PipelineContext pipelineContext + ) { + String collapseField = ConfigurationUtils.readStringProperty(TYPE, tag, config, COLLAPSE_FIELD); + return new CollapseResponseProcessor(tag, description, ignoreFailure, collapseField); + } + } + +} diff --git a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/OversampleRequestProcessor.java b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/OversampleRequestProcessor.java new file mode 100644 index 0000000000000..182cf6ba79504 --- /dev/null +++ b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/OversampleRequestProcessor.java @@ -0,0 +1,83 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pipeline.common; + +import org.opensearch.action.search.SearchRequest; +import org.opensearch.ingest.ConfigurationUtils; +import org.opensearch.search.SearchService; +import org.opensearch.search.pipeline.AbstractProcessor; +import org.opensearch.search.pipeline.PipelineProcessingContext; +import org.opensearch.search.pipeline.Processor; +import org.opensearch.search.pipeline.SearchRequestProcessor; +import org.opensearch.search.pipeline.StatefulSearchRequestProcessor; +import org.opensearch.search.pipeline.common.helpers.ContextUtils; + +import java.util.Map; + +import static org.opensearch.search.pipeline.common.helpers.ContextUtils.applyContextPrefix; + +/** + * Multiplies the "size" parameter on the {@link SearchRequest} by the given scaling factor, storing the original value + * in the request context as "original_size". + */ +public class OversampleRequestProcessor extends AbstractProcessor implements StatefulSearchRequestProcessor { + + /** + * Key to reference this processor type from a search pipeline. + */ + public static final String TYPE = "oversample"; + static final String SAMPLE_FACTOR = "sample_factor"; + static final String ORIGINAL_SIZE = "original_size"; + private final double sampleFactor; + private final String contextPrefix; + + private OversampleRequestProcessor(String tag, String description, boolean ignoreFailure, double sampleFactor, String contextPrefix) { + super(tag, description, ignoreFailure); + this.sampleFactor = sampleFactor; + this.contextPrefix = contextPrefix; + } + + @Override + public SearchRequest processRequest(SearchRequest request, PipelineProcessingContext requestContext) { + if (request.source() != null) { + int originalSize = request.source().size(); + if (originalSize == -1) { + originalSize = SearchService.DEFAULT_SIZE; + } + requestContext.setAttribute(applyContextPrefix(contextPrefix, ORIGINAL_SIZE), originalSize); + int newSize = (int) Math.ceil(originalSize * sampleFactor); + request.source().size(newSize); + } + return request; + } + + @Override + public String getType() { + return TYPE; + } + + static class Factory implements Processor.Factory { + @Override + public OversampleRequestProcessor create( + Map> processorFactories, + String tag, + String description, + boolean ignoreFailure, + Map config, + PipelineContext pipelineContext + ) { + double sampleFactor = ConfigurationUtils.readDoubleProperty(TYPE, tag, config, SAMPLE_FACTOR); + if (sampleFactor < 1.0) { + throw ConfigurationUtils.newConfigurationException(TYPE, tag, SAMPLE_FACTOR, "Value must be >= 1.0"); + } + String contextPrefix = ConfigurationUtils.readOptionalStringProperty(TYPE, tag, config, ContextUtils.CONTEXT_PREFIX_PARAMETER); + return new OversampleRequestProcessor(tag, description, ignoreFailure, sampleFactor, contextPrefix); + } + } +} diff --git a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/ScriptRequestProcessor.java b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/ScriptRequestProcessor.java index 90f71fd1754e4..a4052d0892ee6 100644 --- a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/ScriptRequestProcessor.java +++ b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/ScriptRequestProcessor.java @@ -23,9 +23,10 @@ import org.opensearch.script.ScriptType; import org.opensearch.script.SearchScript; import org.opensearch.search.pipeline.AbstractProcessor; +import org.opensearch.search.pipeline.PipelineProcessingContext; import org.opensearch.search.pipeline.Processor; import org.opensearch.search.pipeline.SearchRequestProcessor; -import org.opensearch.search.pipeline.common.helpers.SearchRequestMap; +import org.opensearch.search.pipeline.StatefulSearchRequestProcessor; import java.io.InputStream; import java.util.HashMap; @@ -38,7 +39,7 @@ * Processor that evaluates a script with a search request in its context * and then returns the modified search request. */ -public final class ScriptRequestProcessor extends AbstractProcessor implements SearchRequestProcessor { +public final class ScriptRequestProcessor extends AbstractProcessor implements StatefulSearchRequestProcessor { /** * Key to reference this processor type from a search pipeline. */ @@ -72,15 +73,8 @@ public final class ScriptRequestProcessor extends AbstractProcessor implements S this.scriptService = scriptService; } - /** - * Executes the script with the search request in context. - * - * @param request The search request passed into the script context. - * @return The modified search request. - * @throws Exception if an error occurs while processing the request. - */ @Override - public SearchRequest processRequest(SearchRequest request) throws Exception { + public SearchRequest processRequest(SearchRequest request, PipelineProcessingContext requestContext) throws Exception { // assert request is not null and source is not null if (request == null || request.source() == null) { throw new IllegalArgumentException("search request must not be null"); @@ -93,10 +87,33 @@ public SearchRequest processRequest(SearchRequest request) throws Exception { searchScript = precompiledSearchScript; } // execute the script with the search request in context - searchScript.execute(Map.of("_source", new SearchRequestMap(request))); + searchScript.execute(Map.of("_source", new SearchRequestMap(request), "request_context", new RequestContextMap(requestContext))); return request; } + private static class RequestContextMap extends BasicMap { + private final PipelineProcessingContext pipelinedRequestContext; + + private RequestContextMap(PipelineProcessingContext pipelinedRequestContext) { + this.pipelinedRequestContext = pipelinedRequestContext; + } + + @Override + public Object get(Object key) { + if (key instanceof String) { + return pipelinedRequestContext.getAttribute(key.toString()); + } + return null; + } + + @Override + public Object put(String key, Object value) { + Object originalValue = get(key); + pipelinedRequestContext.setAttribute(key, value); + return originalValue; + } + } + /** * Returns the type of the processor. * diff --git a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePlugin.java b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePlugin.java index 49681b80fdead..5378a6721efb2 100644 --- a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePlugin.java +++ b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePlugin.java @@ -38,12 +38,21 @@ public Map> getRequestProcesso FilterQueryRequestProcessor.TYPE, new FilterQueryRequestProcessor.Factory(parameters.namedXContentRegistry), ScriptRequestProcessor.TYPE, - new ScriptRequestProcessor.Factory(parameters.scriptService) + new ScriptRequestProcessor.Factory(parameters.scriptService), + OversampleRequestProcessor.TYPE, + new OversampleRequestProcessor.Factory() ); } @Override public Map> getResponseProcessors(Parameters parameters) { - return Map.of(RenameFieldResponseProcessor.TYPE, new RenameFieldResponseProcessor.Factory()); + return Map.of( + RenameFieldResponseProcessor.TYPE, + new RenameFieldResponseProcessor.Factory(), + TruncateHitsResponseProcessor.TYPE, + new TruncateHitsResponseProcessor.Factory(), + CollapseResponseProcessor.TYPE, + new CollapseResponseProcessor.Factory() + ); } } diff --git a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchRequestMap.java b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchRequestMap.java new file mode 100644 index 0000000000000..c6430b96dcbed --- /dev/null +++ b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchRequestMap.java @@ -0,0 +1,140 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pipeline.common; + +import org.opensearch.action.search.SearchRequest; +import org.opensearch.search.builder.SearchSourceBuilder; + +import java.util.Map; + +/** + * A custom implementation of {@link Map} that provides access to the properties of a {@link SearchRequest}'s + * {@link SearchSourceBuilder}. The class allows retrieving and modifying specific properties of the search request. + */ +class SearchRequestMap extends BasicMap implements Map { + + private final SearchSourceBuilder source; + + /** + * Constructs a new instance of the {@link SearchRequestMap} with the provided {@link SearchRequest}. + * + * @param searchRequest The SearchRequest containing the SearchSourceBuilder to be accessed. + */ + public SearchRequestMap(SearchRequest searchRequest) { + source = searchRequest.source(); + } + + /** + * Checks if the SearchSourceBuilder is empty. + * + * @return {@code true} if the SearchSourceBuilder is empty, {@code false} otherwise. + */ + @Override + public boolean isEmpty() { + return source == null; + } + + /** + * Retrieves the value associated with the specified property from the SearchSourceBuilder. + * + * @param key The SearchSourceBuilder property whose value is to be retrieved. + * @return The value associated with the specified property or null if the property has not been initialized. + * @throws IllegalArgumentException if the property name is not a String. + * @throws SearchRequestMapProcessingException if the property is not supported. + */ + @Override + public Object get(Object key) { + if (!(key instanceof String)) { + throw new IllegalArgumentException("key must be a String"); + } + // This is the explicit implementation of fetch value from source + switch ((String) key) { + case "from": + return source.from(); + case "size": + return source.size(); + case "explain": + return source.explain(); + case "version": + return source.version(); + case "seq_no_primary_term": + return source.seqNoAndPrimaryTerm(); + case "track_scores": + return source.trackScores(); + case "track_total_hits": + return source.trackTotalHitsUpTo(); + case "min_score": + return source.minScore(); + case "terminate_after": + return source.terminateAfter(); + case "profile": + return source.profile(); + default: + throw new SearchRequestMapProcessingException("Unsupported key: " + key); + } + } + + /** + * Sets the value for the specified property in the SearchSourceBuilder. + * + * @param key The property whose value is to be set. + * @param value The value to be set for the specified property. + * @return The original value associated with the property, or null if none existed. + * @throws IllegalArgumentException if the property is not a String. + * @throws SearchRequestMapProcessingException if the property is not supported or an error occurs during the setting. + */ + @Override + public Object put(String key, Object value) { + Object originalValue = get(key); + try { + switch (key) { + case "from": + source.from((Integer) value); + break; + case "size": + source.size((Integer) value); + break; + case "explain": + source.explain((Boolean) value); + break; + case "version": + source.version((Boolean) value); + break; + case "seq_no_primary_term": + source.seqNoAndPrimaryTerm((Boolean) value); + break; + case "track_scores": + source.trackScores((Boolean) value); + break; + case "track_total_hits": + source.trackTotalHitsUpTo((Integer) value); + break; + case "min_score": + source.minScore((Float) value); + break; + case "terminate_after": + source.terminateAfter((Integer) value); + break; + case "profile": + source.profile((Boolean) value); + break; + case "stats": // Not modifying stats, sorts, docvalue_fields, etc. as they require more complex handling + case "sort": + case "timeout": + case "docvalue_fields": + case "indices_boost": + default: + throw new SearchRequestMapProcessingException("Unsupported SearchRequest source property: " + key); + } + } catch (Exception e) { + throw new SearchRequestMapProcessingException("Error while setting value for SearchRequest source property: " + key, e); + } + return originalValue; + } +} diff --git a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/helpers/SearchRequestMapProcessingException.java b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchRequestMapProcessingException.java similarity index 76% rename from modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/helpers/SearchRequestMapProcessingException.java rename to modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchRequestMapProcessingException.java index cb1e45a20b624..2f00d0f82c2f1 100644 --- a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/helpers/SearchRequestMapProcessingException.java +++ b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchRequestMapProcessingException.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.search.pipeline.common.helpers; +package org.opensearch.search.pipeline.common; import org.opensearch.OpenSearchException; import org.opensearch.OpenSearchWrapperException; @@ -14,12 +14,12 @@ /** * An exception that indicates an error occurred while processing a {@link SearchRequestMap}. */ -public class SearchRequestMapProcessingException extends OpenSearchException implements OpenSearchWrapperException { +class SearchRequestMapProcessingException extends OpenSearchException implements OpenSearchWrapperException { /** * Constructs a new SearchRequestMapProcessingException with the specified message. * - * @param msg The error message. + * @param msg The error message. * @param args Arguments to substitute in the error message. */ public SearchRequestMapProcessingException(String msg, Object... args) { @@ -29,9 +29,9 @@ public SearchRequestMapProcessingException(String msg, Object... args) { /** * Constructs a new SearchRequestMapProcessingException with the specified message and cause. * - * @param msg The error message. + * @param msg The error message. * @param cause The cause of the exception. - * @param args Arguments to substitute in the error message. + * @param args Arguments to substitute in the error message. */ public SearchRequestMapProcessingException(String msg, Throwable cause, Object... args) { super(msg, cause, args); diff --git a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/TruncateHitsResponseProcessor.java b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/TruncateHitsResponseProcessor.java new file mode 100644 index 0000000000000..e3413bf41720f --- /dev/null +++ b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/TruncateHitsResponseProcessor.java @@ -0,0 +1,96 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pipeline.common; + +import org.opensearch.action.search.SearchRequest; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.ingest.ConfigurationUtils; +import org.opensearch.search.SearchHit; +import org.opensearch.search.pipeline.AbstractProcessor; +import org.opensearch.search.pipeline.PipelineProcessingContext; +import org.opensearch.search.pipeline.Processor; +import org.opensearch.search.pipeline.SearchResponseProcessor; +import org.opensearch.search.pipeline.StatefulSearchResponseProcessor; +import org.opensearch.search.pipeline.common.helpers.ContextUtils; +import org.opensearch.search.pipeline.common.helpers.SearchResponseUtil; + +import java.util.Map; + +import static org.opensearch.search.pipeline.common.helpers.ContextUtils.applyContextPrefix; + +/** + * Truncates the returned search hits from the {@link SearchResponse}. If no target size is specified in the pipeline, then + * we try using the "original_size" value from the request context, which may have been set by {@link OversampleRequestProcessor}. + */ +public class TruncateHitsResponseProcessor extends AbstractProcessor implements StatefulSearchResponseProcessor { + /** + * Key to reference this processor type from a search pipeline. + */ + public static final String TYPE = "truncate_hits"; + static final String TARGET_SIZE = "target_size"; + private final int targetSize; + private final String contextPrefix; + + @Override + public String getType() { + return TYPE; + } + + private TruncateHitsResponseProcessor(String tag, String description, boolean ignoreFailure, int targetSize, String contextPrefix) { + super(tag, description, ignoreFailure); + this.targetSize = targetSize; + this.contextPrefix = contextPrefix; + } + + @Override + public SearchResponse processResponse(SearchRequest request, SearchResponse response, PipelineProcessingContext requestContext) { + int size; + if (targetSize < 0) { // No value specified in processor config. Use context value instead. + String key = applyContextPrefix(contextPrefix, OversampleRequestProcessor.ORIGINAL_SIZE); + Object o = requestContext.getAttribute(key); + if (o == null) { + throw new IllegalStateException("Must specify " + TARGET_SIZE + " unless an earlier processor set " + key); + } + size = (int) o; + } else { + size = targetSize; + } + if (response.getHits() != null && response.getHits().getHits().length > size) { + SearchHit[] newHits = new SearchHit[size]; + System.arraycopy(response.getHits().getHits(), 0, newHits, 0, size); + return SearchResponseUtil.replaceHits(newHits, response); + } + return response; + } + + static class Factory implements Processor.Factory { + @Override + public TruncateHitsResponseProcessor create( + Map> processorFactories, + String tag, + String description, + boolean ignoreFailure, + Map config, + PipelineContext pipelineContext + ) { + Integer targetSize = ConfigurationUtils.readIntProperty(TYPE, tag, config, TARGET_SIZE, null); + if (targetSize == null) { + // Use -1 as an "unset" marker to avoid repeated unboxing of an Integer. + targetSize = -1; + } else { + // Explicitly set values must be >= 0. + if (targetSize < 0) { + throw ConfigurationUtils.newConfigurationException(TYPE, tag, TARGET_SIZE, "Value must be >= 0"); + } + } + String contextPrefix = ConfigurationUtils.readOptionalStringProperty(TYPE, tag, config, ContextUtils.CONTEXT_PREFIX_PARAMETER); + return new TruncateHitsResponseProcessor(tag, description, ignoreFailure, targetSize, contextPrefix); + } + } +} diff --git a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/helpers/ContextUtils.java b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/helpers/ContextUtils.java new file mode 100644 index 0000000000000..9697da85dbecf --- /dev/null +++ b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/helpers/ContextUtils.java @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pipeline.common.helpers; + +/** + * Helpers for working with request-scoped context. + */ +public final class ContextUtils { + private ContextUtils() {} + + /** + * Parameter that can be passed to a stateful processor to avoid collisions between contextual variables by + * prefixing them with distinct qualifiers. + */ + public static final String CONTEXT_PREFIX_PARAMETER = "context_prefix"; + + /** + * Replaces a "global" variable name with one scoped to a given context prefix (unless prefix is null or empty). + * @param contextPrefix the prefix qualifier for the variable + * @param variableName the generic "global" form of the context variable + * @return the variableName prefixed with contextPrefix followed by ".", or just variableName if contextPrefix is null or empty + */ + public static String applyContextPrefix(String contextPrefix, String variableName) { + String contextVariable; + if (contextPrefix != null && contextPrefix.isEmpty() == false) { + contextVariable = contextPrefix + "." + variableName; + } else { + contextVariable = variableName; + } + return contextVariable; + } +} diff --git a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/helpers/SearchRequestMap.java b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/helpers/SearchRequestMap.java deleted file mode 100644 index 7af3ac66be146..0000000000000 --- a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/helpers/SearchRequestMap.java +++ /dev/null @@ -1,395 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.search.pipeline.common.helpers; - -import org.opensearch.action.search.SearchRequest; -import org.opensearch.search.builder.SearchSourceBuilder; - -import java.util.Collection; -import java.util.Map; -import java.util.Set; -import java.util.function.BiConsumer; -import java.util.function.BiFunction; -import java.util.function.Function; - -/** - * A custom implementation of {@link Map} that provides access to the properties of a {@link SearchRequest}'s - * {@link SearchSourceBuilder}. The class allows retrieving and modifying specific properties of the search request. - */ -public class SearchRequestMap implements Map { - private static final String UNSUPPORTED_OP_ERR = " Method not supported in Search pipeline script"; - - private final SearchSourceBuilder source; - - /** - * Constructs a new instance of the {@link SearchRequestMap} with the provided {@link SearchRequest}. - * - * @param searchRequest The SearchRequest containing the SearchSourceBuilder to be accessed. - */ - public SearchRequestMap(SearchRequest searchRequest) { - source = searchRequest.source(); - } - - /** - * Retrieves the number of properties in the SearchSourceBuilder. - * - * @return The number of properties in the SearchSourceBuilder. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public int size() { - throw new UnsupportedOperationException("size" + UNSUPPORTED_OP_ERR); - } - - /** - * Checks if the SearchSourceBuilder is empty. - * - * @return {@code true} if the SearchSourceBuilder is empty, {@code false} otherwise. - */ - @Override - public boolean isEmpty() { - return source == null; - } - - /** - * Checks if the SearchSourceBuilder contains the specified property. - * - * @param key The property to check for. - * @return {@code true} if the SearchSourceBuilder contains the specified property, {@code false} otherwise. - */ - @Override - public boolean containsKey(Object key) { - return get(key) != null; - } - - /** - * Checks if the SearchSourceBuilder contains the specified value. - * - * @param value The value to check for. - * @return {@code true} if the SearchSourceBuilder contains the specified value, {@code false} otherwise. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public boolean containsValue(Object value) { - throw new UnsupportedOperationException("containsValue" + UNSUPPORTED_OP_ERR); - } - - /** - * Retrieves the value associated with the specified property from the SearchSourceBuilder. - * - * @param key The SearchSourceBuilder property whose value is to be retrieved. - * @return The value associated with the specified property or null if the property has not been initialized. - * @throws IllegalArgumentException if the property name is not a String. - * @throws SearchRequestMapProcessingException if the property is not supported. - */ - @Override - public Object get(Object key) { - if (!(key instanceof String)) { - throw new IllegalArgumentException("key must be a String"); - } - // This is the explicit implementation of fetch value from source - switch ((String) key) { - case "from": - return source.from(); - case "size": - return source.size(); - case "explain": - return source.explain(); - case "version": - return source.version(); - case "seq_no_primary_term": - return source.seqNoAndPrimaryTerm(); - case "track_scores": - return source.trackScores(); - case "track_total_hits": - return source.trackTotalHitsUpTo(); - case "min_score": - return source.minScore(); - case "terminate_after": - return source.terminateAfter(); - case "profile": - return source.profile(); - default: - throw new SearchRequestMapProcessingException("Unsupported key: " + key); - } - } - - /** - * Sets the value for the specified property in the SearchSourceBuilder. - * - * @param key The property whose value is to be set. - * @param value The value to be set for the specified property. - * @return The original value associated with the property, or null if none existed. - * @throws IllegalArgumentException if the property is not a String. - * @throws SearchRequestMapProcessingException if the property is not supported or an error occurs during the setting. - */ - @Override - public Object put(String key, Object value) { - Object originalValue = get(key); - try { - switch (key) { - case "from": - source.from((Integer) value); - break; - case "size": - source.size((Integer) value); - break; - case "explain": - source.explain((Boolean) value); - break; - case "version": - source.version((Boolean) value); - break; - case "seq_no_primary_term": - source.seqNoAndPrimaryTerm((Boolean) value); - break; - case "track_scores": - source.trackScores((Boolean) value); - break; - case "track_total_hits": - source.trackTotalHitsUpTo((Integer) value); - break; - case "min_score": - source.minScore((Float) value); - break; - case "terminate_after": - source.terminateAfter((Integer) value); - break; - case "profile": - source.profile((Boolean) value); - break; - case "stats": // Not modifying stats, sorts, docvalue_fields, etc. as they require more complex handling - case "sort": - case "timeout": - case "docvalue_fields": - case "indices_boost": - default: - throw new SearchRequestMapProcessingException("Unsupported SearchRequest source property: " + key); - } - } catch (Exception e) { - throw new SearchRequestMapProcessingException("Error while setting value for SearchRequest source property: " + key, e); - } - return originalValue; - } - - /** - * Removes the specified property from the SearchSourceBuilder. - * - * @param key The name of the property that will be removed. - * @return The value associated with the property before it was removed, or null if the property was not found. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public Object remove(Object key) { - throw new UnsupportedOperationException("remove" + UNSUPPORTED_OP_ERR); - } - - /** - * Sets all the properties from the specified map to the SearchSourceBuilder. - * - * @param m The map containing the properties to be set. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public void putAll(Map m) { - throw new UnsupportedOperationException("putAll" + UNSUPPORTED_OP_ERR); - } - - /** - * Removes all properties from the SearchSourceBuilder. - * - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public void clear() { - throw new UnsupportedOperationException("clear" + UNSUPPORTED_OP_ERR); - } - - /** - * Returns a set view of the property names in the SearchSourceBuilder. - * - * @return A set view of the property names in the SearchSourceBuilder. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public Set keySet() { - throw new UnsupportedOperationException("keySet" + UNSUPPORTED_OP_ERR); - } - - /** - * Returns a collection view of the property values in the SearchSourceBuilder. - * - * @return A collection view of the property values in the SearchSourceBuilder. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public Collection values() { - throw new UnsupportedOperationException("values" + UNSUPPORTED_OP_ERR); - } - - /** - * Returns a set view of the properties in the SearchSourceBuilder. - * - * @return A set view of the properties in the SearchSourceBuilder. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public Set> entrySet() { - throw new UnsupportedOperationException("entrySet" + UNSUPPORTED_OP_ERR); - } - - /** - * Returns the value to which the specified property has, or the defaultValue if the property is not present in the - * SearchSourceBuilder. - * - * @param key The property whose associated value is to be returned. - * @param defaultValue The default value to be returned if the property is not present. - * @return The value to which the specified property has, or the defaultValue if the property is not present. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public Object getOrDefault(Object key, Object defaultValue) { - throw new UnsupportedOperationException("getOrDefault" + UNSUPPORTED_OP_ERR); - } - - /** - * Performs the given action for each property in the SearchSourceBuilder until all properties have been processed or the - * action throws an exception - * - * @param action The action to be performed for each property. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public void forEach(BiConsumer action) { - throw new UnsupportedOperationException("forEach" + UNSUPPORTED_OP_ERR); - } - - /** - * Replaces each property's value with the result of invoking the given function on that property until all properties have - * been processed or the function throws an exception. - * - * @param function The function to apply to each property. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public void replaceAll(BiFunction function) { - throw new UnsupportedOperationException("replaceAll" + UNSUPPORTED_OP_ERR); - } - - /** - * If the specified property is not already associated with a value, associates it with the given value and returns null, - * else returns the current value. - * - * @param key The property whose value is to be set if absent. - * @param value The value to be associated with the specified property. - * @return The current value associated with the property, or null if the property is not present. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public Object putIfAbsent(String key, Object value) { - throw new UnsupportedOperationException("putIfAbsent" + UNSUPPORTED_OP_ERR); - } - - /** - * Removes the property only if it has the given value. - * - * @param key The property to be removed. - * @param value The value expected to be associated with the property. - * @return {@code true} if the entry was removed, {@code false} otherwise. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public boolean remove(Object key, Object value) { - throw new UnsupportedOperationException("remove" + UNSUPPORTED_OP_ERR); - } - - /** - * Replaces the specified property only if it has the given value. - * - * @param key The property to be replaced. - * @param oldValue The value expected to be associated with the property. - * @param newValue The value to be associated with the property. - * @return {@code true} if the property was replaced, {@code false} otherwise. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public boolean replace(String key, Object oldValue, Object newValue) { - throw new UnsupportedOperationException("replace" + UNSUPPORTED_OP_ERR); - } - - /** - * Replaces the specified property only if it has the given value. - * - * @param key The property to be replaced. - * @param value The value to be associated with the property. - * @return The previous value associated with the property, or null if the property was not found. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public Object replace(String key, Object value) { - throw new UnsupportedOperationException("replace" + UNSUPPORTED_OP_ERR); - } - - /** - * The computed value associated with the property, or null if the property is not present. - * - * @param key The property whose value is to be computed if absent. - * @param mappingFunction The function to compute a value based on the property. - * @return The computed value associated with the property, or null if the property is not present. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public Object computeIfAbsent(String key, Function mappingFunction) { - throw new UnsupportedOperationException("computeIfAbsent" + UNSUPPORTED_OP_ERR); - } - - /** - * If the value for the specified property is present, attempts to compute a new mapping given the property and its current - * mapped value. - * - * @param key The property for which the mapping is to be computed. - * @param remappingFunction The function to compute a new mapping. - * @return The new value associated with the property, or null if the property is not present. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public Object computeIfPresent(String key, BiFunction remappingFunction) { - throw new UnsupportedOperationException("computeIfPresent" + UNSUPPORTED_OP_ERR); - } - - /** - * If the value for the specified property is present, attempts to compute a new mapping given the property and its current - * mapped value, or removes the property if the computed value is null. - * - * @param key The property for which the mapping is to be computed. - * @param remappingFunction The function to compute a new mapping. - * @return The new value associated with the property, or null if the property is not present. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public Object compute(String key, BiFunction remappingFunction) { - throw new UnsupportedOperationException("compute" + UNSUPPORTED_OP_ERR); - } - - /** - * If the specified property is not already associated with a value or is associated with null, associates it with the - * given non-null value. Otherwise, replaces the associated value with the results of applying the given - * remapping function to the current and new values. - * - * @param key The property for which the mapping is to be merged. - * @param value The non-null value to be merged with the existing value. - * @param remappingFunction The function to merge the existing and new values. - * @return The new value associated with the property, or null if the property is not present. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public Object merge(String key, Object value, BiFunction remappingFunction) { - throw new UnsupportedOperationException("merge" + UNSUPPORTED_OP_ERR); - } -} diff --git a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/helpers/SearchResponseUtil.java b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/helpers/SearchResponseUtil.java new file mode 100644 index 0000000000000..0710548c6429f --- /dev/null +++ b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/helpers/SearchResponseUtil.java @@ -0,0 +1,93 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pipeline.common.helpers; + +import org.opensearch.action.search.SearchResponse; +import org.opensearch.action.search.SearchResponseSections; +import org.opensearch.search.SearchHit; +import org.opensearch.search.SearchHits; +import org.opensearch.search.aggregations.InternalAggregations; +import org.opensearch.search.internal.InternalSearchResponse; +import org.opensearch.search.profile.SearchProfileShardResults; + +/** + * Helper methods for manipulating {@link SearchResponse}. + */ +public final class SearchResponseUtil { + private SearchResponseUtil() { + + } + + /** + * Construct a new {@link SearchResponse} based on an existing one, replacing just the {@link SearchHits}. + * @param newHits new {@link SearchHits} + * @param response the existing search response + * @return a new search response where the {@link SearchHits} has been replaced + */ + public static SearchResponse replaceHits(SearchHits newHits, SearchResponse response) { + SearchResponseSections searchResponseSections; + if (response.getAggregations() == null || response.getAggregations() instanceof InternalAggregations) { + // We either have no aggregations, or we have Writeable InternalAggregations. + // Either way, we can produce a Writeable InternalSearchResponse. + searchResponseSections = new InternalSearchResponse( + newHits, + (InternalAggregations) response.getAggregations(), + response.getSuggest(), + new SearchProfileShardResults(response.getProfileResults()), + response.isTimedOut(), + response.isTerminatedEarly(), + response.getNumReducePhases() + ); + } else { + // We have non-Writeable Aggregations, so the whole SearchResponseSections is non-Writeable. + searchResponseSections = new SearchResponseSections( + newHits, + response.getAggregations(), + response.getSuggest(), + response.isTimedOut(), + response.isTerminatedEarly(), + new SearchProfileShardResults(response.getProfileResults()), + response.getNumReducePhases() + ); + } + + return new SearchResponse( + searchResponseSections, + response.getScrollId(), + response.getTotalShards(), + response.getSuccessfulShards(), + response.getSkippedShards(), + response.getTook().millis(), + response.getShardFailures(), + response.getClusters(), + response.pointInTimeId() + ); + } + + /** + * Convenience method when only replacing the {@link SearchHit} array within the {@link SearchHits} in a {@link SearchResponse}. + * @param newHits the new array of {@link SearchHit} elements. + * @param response the search response to update + * @return a {@link SearchResponse} where the underlying array of {@link SearchHit} within the {@link SearchHits} has been replaced. + */ + public static SearchResponse replaceHits(SearchHit[] newHits, SearchResponse response) { + if (response.getHits() == null) { + throw new IllegalStateException("Response must have hits"); + } + SearchHits searchHits = new SearchHits( + newHits, + response.getHits().getTotalHits(), + response.getHits().getMaxScore(), + response.getHits().getSortFields(), + response.getHits().getCollapseField(), + response.getHits().getCollapseValues() + ); + return replaceHits(searchHits, response); + } +} diff --git a/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/CollapseResponseProcessorTests.java b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/CollapseResponseProcessorTests.java new file mode 100644 index 0000000000000..cda011f24fea1 --- /dev/null +++ b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/CollapseResponseProcessorTests.java @@ -0,0 +1,86 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pipeline.common; + +import org.apache.lucene.search.TotalHits; +import org.opensearch.action.search.SearchRequest; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.document.DocumentField; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.search.SearchHit; +import org.opensearch.search.SearchHits; +import org.opensearch.search.internal.InternalSearchResponse; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class CollapseResponseProcessorTests extends OpenSearchTestCase { + public void testWithDocumentFields() { + testProcessor(true); + } + + public void testWithSourceField() { + testProcessor(false); + } + + private void testProcessor(boolean includeDocField) { + Map config = new HashMap<>(Map.of(CollapseResponseProcessor.COLLAPSE_FIELD, "groupid")); + CollapseResponseProcessor processor = new CollapseResponseProcessor.Factory().create( + Collections.emptyMap(), + null, + null, + false, + config, + null + ); + int numHits = randomIntBetween(1, 100); + SearchResponse inputResponse = generateResponse(numHits, includeDocField); + + SearchResponse processedResponse = processor.processResponse(new SearchRequest(), inputResponse); + if (numHits % 2 == 0) { + assertEquals(numHits / 2, processedResponse.getHits().getHits().length); + } else { + assertEquals(numHits / 2 + 1, processedResponse.getHits().getHits().length); + } + for (SearchHit collapsedHit : processedResponse.getHits()) { + assertEquals(0, collapsedHit.docId() % 2); + } + assertEquals("groupid", processedResponse.getHits().getCollapseField()); + assertEquals(processedResponse.getHits().getHits().length, processedResponse.getHits().getCollapseValues().length); + for (int i = 0; i < processedResponse.getHits().getHits().length; i++) { + assertEquals(i, processedResponse.getHits().getCollapseValues()[i]); + } + } + + private static SearchResponse generateResponse(int numHits, boolean includeDocField) { + SearchHit[] hitsArray = new SearchHit[numHits]; + for (int i = 0; i < numHits; i++) { + Map docFields; + int groupValue = i / 2; + if (includeDocField) { + docFields = Map.of("groupid", new DocumentField("groupid", List.of(groupValue))); + } else { + docFields = Collections.emptyMap(); + } + SearchHit hit = new SearchHit(i, Integer.toString(i), docFields, Collections.emptyMap()); + hit.sourceRef(new BytesArray("{\"groupid\": " + groupValue + "}")); + hitsArray[i] = hit; + } + SearchHits searchHits = new SearchHits( + hitsArray, + new TotalHits(Math.max(numHits, 1000), TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO), + 1.0f + ); + InternalSearchResponse internalSearchResponse = new InternalSearchResponse(searchHits, null, null, null, false, false, 0); + return new SearchResponse(internalSearchResponse, null, 1, 1, 0, 10, null, null); + } +} diff --git a/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/OversampleRequestProcessorTests.java b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/OversampleRequestProcessorTests.java new file mode 100644 index 0000000000000..96e99dff9cc03 --- /dev/null +++ b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/OversampleRequestProcessorTests.java @@ -0,0 +1,62 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pipeline.common; + +import org.opensearch.action.search.SearchRequest; +import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.search.pipeline.PipelineProcessingContext; +import org.opensearch.search.pipeline.common.helpers.ContextUtils; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +public class OversampleRequestProcessorTests extends OpenSearchTestCase { + + public void testEmptySource() { + OversampleRequestProcessor.Factory factory = new OversampleRequestProcessor.Factory(); + Map config = new HashMap<>(Map.of(OversampleRequestProcessor.SAMPLE_FACTOR, 3.0)); + OversampleRequestProcessor processor = factory.create(Collections.emptyMap(), null, null, false, config, null); + + SearchRequest request = new SearchRequest(); + PipelineProcessingContext context = new PipelineProcessingContext(); + SearchRequest transformedRequest = processor.processRequest(request, context); + assertEquals(request, transformedRequest); + assertNull(context.getAttribute("original_size")); + } + + public void testBasicBehavior() { + OversampleRequestProcessor.Factory factory = new OversampleRequestProcessor.Factory(); + Map config = new HashMap<>(Map.of(OversampleRequestProcessor.SAMPLE_FACTOR, 3.0)); + OversampleRequestProcessor processor = factory.create(Collections.emptyMap(), null, null, false, config, null); + + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().size(10); + SearchRequest request = new SearchRequest().source(sourceBuilder); + PipelineProcessingContext context = new PipelineProcessingContext(); + SearchRequest transformedRequest = processor.processRequest(request, context); + assertEquals(30, transformedRequest.source().size()); + assertEquals(10, context.getAttribute("original_size")); + } + + public void testContextPrefix() { + OversampleRequestProcessor.Factory factory = new OversampleRequestProcessor.Factory(); + Map config = new HashMap<>( + Map.of(OversampleRequestProcessor.SAMPLE_FACTOR, 3.0, ContextUtils.CONTEXT_PREFIX_PARAMETER, "foo") + ); + OversampleRequestProcessor processor = factory.create(Collections.emptyMap(), null, null, false, config, null); + + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().size(10); + SearchRequest request = new SearchRequest().source(sourceBuilder); + PipelineProcessingContext context = new PipelineProcessingContext(); + SearchRequest transformedRequest = processor.processRequest(request, context); + assertEquals(30, transformedRequest.source().size()); + assertEquals(10, context.getAttribute("foo.original_size")); + } +} diff --git a/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/ScriptRequestProcessorTests.java b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/ScriptRequestProcessorTests.java index fde9757312e30..b372b220b71ac 100644 --- a/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/ScriptRequestProcessorTests.java +++ b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/ScriptRequestProcessorTests.java @@ -18,7 +18,7 @@ import org.opensearch.script.ScriptType; import org.opensearch.script.SearchScript; import org.opensearch.search.builder.SearchSourceBuilder; -import org.opensearch.search.pipeline.common.helpers.SearchRequestMap; +import org.opensearch.search.pipeline.PipelineProcessingContext; import org.opensearch.test.OpenSearchTestCase; import org.junit.Before; @@ -27,8 +27,6 @@ import java.util.Map; import java.util.concurrent.TimeUnit; -import static org.hamcrest.core.Is.is; - public class ScriptRequestProcessorTests extends OpenSearchTestCase { private ScriptService scriptService; @@ -87,7 +85,7 @@ public void testScriptingWithoutPrecompiledScriptFactory() throws Exception { searchRequest.source(createSearchSourceBuilder()); assertNotNull(searchRequest); - processor.processRequest(searchRequest); + processor.processRequest(searchRequest, new PipelineProcessingContext()); assertSearchRequest(searchRequest); } @@ -104,7 +102,7 @@ public void testScriptingWithPrecompiledIngestScript() throws Exception { searchRequest.source(createSearchSourceBuilder()); assertNotNull(searchRequest); - processor.processRequest(searchRequest); + processor.processRequest(searchRequest, new PipelineProcessingContext()); assertSearchRequest(searchRequest); } @@ -124,15 +122,15 @@ private SearchSourceBuilder createSearchSourceBuilder() { } private void assertSearchRequest(SearchRequest searchRequest) { - assertThat(searchRequest.source().from(), is(20)); - assertThat(searchRequest.source().size(), is(30)); - assertThat(searchRequest.source().explain(), is(false)); - assertThat(searchRequest.source().version(), is(false)); - assertThat(searchRequest.source().seqNoAndPrimaryTerm(), is(false)); - assertThat(searchRequest.source().trackScores(), is(false)); - assertThat(searchRequest.source().trackTotalHitsUpTo(), is(4)); - assertThat(searchRequest.source().minScore(), is(2.0f)); - assertThat(searchRequest.source().timeout(), is(new TimeValue(60, TimeUnit.SECONDS))); - assertThat(searchRequest.source().terminateAfter(), is(6)); + assertEquals(20, searchRequest.source().from()); + assertEquals(30, searchRequest.source().size()); + assertFalse(searchRequest.source().explain()); + assertFalse(searchRequest.source().version()); + assertFalse(searchRequest.source().seqNoAndPrimaryTerm()); + assertFalse(searchRequest.source().trackScores()); + assertEquals(4, searchRequest.source().trackTotalHitsUpTo().intValue()); + assertEquals(2.0f, searchRequest.source().minScore(), 0.0001); + assertEquals(new TimeValue(60, TimeUnit.SECONDS), searchRequest.source().timeout()); + assertEquals(6, searchRequest.source().terminateAfter()); } } diff --git a/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/helpers/SearchRequestMapTests.java b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/SearchRequestMapTests.java similarity index 99% rename from modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/helpers/SearchRequestMapTests.java rename to modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/SearchRequestMapTests.java index 5572f28335e1c..c982ada7b5ea5 100644 --- a/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/helpers/SearchRequestMapTests.java +++ b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/SearchRequestMapTests.java @@ -5,7 +5,7 @@ * this file be licensed under the Apache-2.0 license or a * compatible open source license. */ -package org.opensearch.search.pipeline.common.helpers; +package org.opensearch.search.pipeline.common; import org.opensearch.action.search.SearchRequest; import org.opensearch.search.builder.SearchSourceBuilder; diff --git a/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/TruncateHitsResponseProcessorTests.java b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/TruncateHitsResponseProcessorTests.java new file mode 100644 index 0000000000000..7615225c7f77e --- /dev/null +++ b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/TruncateHitsResponseProcessorTests.java @@ -0,0 +1,91 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pipeline.common; + +import org.apache.lucene.search.TotalHits; +import org.opensearch.action.search.SearchRequest; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.search.SearchHit; +import org.opensearch.search.SearchHits; +import org.opensearch.search.internal.InternalSearchResponse; +import org.opensearch.search.pipeline.PipelineProcessingContext; +import org.opensearch.search.pipeline.common.helpers.ContextUtils; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +public class TruncateHitsResponseProcessorTests extends OpenSearchTestCase { + + public void testBasicBehavior() { + int targetSize = randomInt(50); + TruncateHitsResponseProcessor.Factory factory = new TruncateHitsResponseProcessor.Factory(); + Map config = new HashMap<>(Map.of(TruncateHitsResponseProcessor.TARGET_SIZE, targetSize)); + TruncateHitsResponseProcessor processor = factory.create(Collections.emptyMap(), null, null, false, config, null); + + int numHits = randomInt(100); + SearchResponse response = constructResponse(numHits); + SearchResponse transformedResponse = processor.processResponse(new SearchRequest(), response, new PipelineProcessingContext()); + assertEquals(Math.min(targetSize, numHits), transformedResponse.getHits().getHits().length); + } + + public void testTargetSizePassedViaContext() { + TruncateHitsResponseProcessor.Factory factory = new TruncateHitsResponseProcessor.Factory(); + TruncateHitsResponseProcessor processor = factory.create(Collections.emptyMap(), null, null, false, Collections.emptyMap(), null); + + int targetSize = randomInt(50); + int numHits = randomInt(100); + SearchResponse response = constructResponse(numHits); + PipelineProcessingContext requestContext = new PipelineProcessingContext(); + requestContext.setAttribute("original_size", targetSize); + SearchResponse transformedResponse = processor.processResponse(new SearchRequest(), response, requestContext); + assertEquals(Math.min(targetSize, numHits), transformedResponse.getHits().getHits().length); + } + + public void testTargetSizePassedViaContextWithPrefix() { + TruncateHitsResponseProcessor.Factory factory = new TruncateHitsResponseProcessor.Factory(); + Map config = new HashMap<>(Map.of(ContextUtils.CONTEXT_PREFIX_PARAMETER, "foo")); + TruncateHitsResponseProcessor processor = factory.create(Collections.emptyMap(), null, null, false, config, null); + + int targetSize = randomInt(50); + int numHits = randomInt(100); + SearchResponse response = constructResponse(numHits); + PipelineProcessingContext requestContext = new PipelineProcessingContext(); + requestContext.setAttribute("foo.original_size", targetSize); + SearchResponse transformedResponse = processor.processResponse(new SearchRequest(), response, requestContext); + assertEquals(Math.min(targetSize, numHits), transformedResponse.getHits().getHits().length); + } + + public void testTargetSizeMissing() { + TruncateHitsResponseProcessor.Factory factory = new TruncateHitsResponseProcessor.Factory(); + TruncateHitsResponseProcessor processor = factory.create(Collections.emptyMap(), null, null, false, Collections.emptyMap(), null); + + int numHits = randomInt(100); + SearchResponse response = constructResponse(numHits); + assertThrows( + IllegalStateException.class, + () -> processor.processResponse(new SearchRequest(), response, new PipelineProcessingContext()) + ); + } + + private static SearchResponse constructResponse(int numHits) { + SearchHit[] hitsArray = new SearchHit[numHits]; + for (int i = 0; i < numHits; i++) { + hitsArray[i] = new SearchHit(i, Integer.toString(i), Collections.emptyMap(), Collections.emptyMap()); + } + SearchHits searchHits = new SearchHits( + hitsArray, + new TotalHits(Math.max(numHits, 1000), TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO), + 1.0f + ); + InternalSearchResponse internalSearchResponse = new InternalSearchResponse(searchHits, null, null, null, false, false, 0); + return new SearchResponse(internalSearchResponse, null, 1, 1, 0, 10, null, null); + } +} diff --git a/modules/search-pipeline-common/src/yamlRestTest/resources/rest-api-spec/test/search_pipeline/60_oversample_truncate.yml b/modules/search-pipeline-common/src/yamlRestTest/resources/rest-api-spec/test/search_pipeline/60_oversample_truncate.yml new file mode 100644 index 0000000000000..1f9e95084322d --- /dev/null +++ b/modules/search-pipeline-common/src/yamlRestTest/resources/rest-api-spec/test/search_pipeline/60_oversample_truncate.yml @@ -0,0 +1,105 @@ +--- +teardown: + - do: + search_pipeline.delete: + id: "my_pipeline" + ignore: 404 + +--- +"Test state propagating from oversample to truncate_hits processor": + - do: + search_pipeline.put: + id: "my_pipeline" + body: > + { + "description": "_description", + "request_processors": [ + { + "oversample" : { + "sample_factor" : 2 + } + } + ], + "response_processors": [ + { + "collapse" : { + "field" : "group_id" + } + }, + { + "truncate_hits" : {} + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + body: { + "group_id": "a", + "popularity" : 1 + } + - do: + index: + index: test + id: 2 + body: { + "group_id": "a", + "popularity" : 2 + } + - do: + index: + index: test + id: 3 + body: { + "group_id": "b", + "popularity" : 3 + } + - do: + index: + index: test + id: 4 + body: { + "group_id": "b", + "popularity" : 4 + } + - do: + indices.refresh: + index: test + + - do: + search: + body: { + "query" : { + "function_score" : { + "field_value_factor" : { + "field" : "popularity" + } + } + }, + "size" : 2 + } + - match: { hits.total.value: 4 } + - length: { hits.hits: 2 } + - match: { hits.hits.0._id: "4" } + - match: { hits.hits.1._id: "3" } + + - do: + search: + search_pipeline: my_pipeline + body: { + "query" : { + "function_score" : { + "field_value_factor" : { + "field" : "popularity" + } + } + }, + "size" : 2 + } + - match: { hits.total.value: 4 } + - length: { hits.hits: 2 } + - match: { hits.hits.0._id: "4" } + - match: { hits.hits.1._id: "2" } diff --git a/modules/search-pipeline-common/src/yamlRestTest/resources/rest-api-spec/test/search_pipeline/70_script_truncate.yml b/modules/search-pipeline-common/src/yamlRestTest/resources/rest-api-spec/test/search_pipeline/70_script_truncate.yml new file mode 100644 index 0000000000000..9c9f6747e9bdc --- /dev/null +++ b/modules/search-pipeline-common/src/yamlRestTest/resources/rest-api-spec/test/search_pipeline/70_script_truncate.yml @@ -0,0 +1,70 @@ +--- +teardown: + - do: + search_pipeline.delete: + id: "my_pipeline" + ignore: 404 + +--- +"Test state propagating from script request to truncate_hits processor": + - do: + search_pipeline.put: + id: "my_pipeline" + body: > + { + "description": "_description", + "request_processors": [ + { + "script" : { + "source" : "ctx.request_context['foo.original_size'] = 2" + } + } + ], + "response_processors": [ + { + "truncate_hits" : { + "context_prefix" : "foo" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + body: {} + - do: + index: + index: test + id: 2 + body: {} + - do: + index: + index: test + id: 3 + body: {} + - do: + index: + index: test + id: 4 + body: {} + - do: + indices.refresh: + index: test + + - do: + search: + body: { + } + - match: { hits.total.value: 4 } + - length: { hits.hits: 4 } + + - do: + search: + search_pipeline: my_pipeline + body: { + } + - match: { hits.total.value: 4 } + - length: { hits.hits: 2 } diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.100.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.100.Final.jar.sha1 deleted file mode 100644 index aaf2e35302d77..0000000000000 --- a/modules/transport-netty4/licenses/netty-buffer-4.1.100.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -39b05d2d4027971bf99111a9be1d7035a116bb55 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.101.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..7bb27bbfcb87d --- /dev/null +++ b/modules/transport-netty4/licenses/netty-buffer-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +a4d94fd6cdf7a37b15237e32434afd6b955cc591 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.100.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.100.Final.jar.sha1 deleted file mode 100644 index a77333ea8ae47..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-4.1.100.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9c3c71e7cf3b8ce3bfc9fa52a524b9ca7ddf259c \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.101.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..c792b4e834030 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +642523c57c4be15b8b461be7b1532262d193bbb3 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.100.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.100.Final.jar.sha1 deleted file mode 100644 index 6f26bf4e6a9b5..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http-4.1.100.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -992623e7d8f2d96e41faf1687bb963f5433e3517 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.101.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..8bf31f6c3a2c7 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +c648f863b301e3fc62d0de098ec61be7628b8ea2 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 deleted file mode 100644 index bf5605151406e..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cbf1a430ea44dbdedbcde16b185cbb95f28d72c7 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.101.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..48b42bfab9287 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http2-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +40590da6c615b852384542051330e9fd51d4c4b1 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.100.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.100.Final.jar.sha1 deleted file mode 100644 index d2ff72db60d1f..0000000000000 --- a/modules/transport-netty4/licenses/netty-common-4.1.100.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -847f942381145de23f21c836d05b0677474271d3 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.101.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..f4fe86799eaff --- /dev/null +++ b/modules/transport-netty4/licenses/netty-common-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +bdb16e4d308b5757123decc886896815d1daf7a3 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.100.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.100.Final.jar.sha1 deleted file mode 100644 index f12a6046e96d0..0000000000000 --- a/modules/transport-netty4/licenses/netty-handler-4.1.100.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4c0acdb8bb73647ebb3847ac2d503d53d72c02b4 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.101.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..cfc0befee237a --- /dev/null +++ b/modules/transport-netty4/licenses/netty-handler-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +7800aea949bf95f63df73166d144e49405b279dd \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.100.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.100.Final.jar.sha1 deleted file mode 100644 index 8e4179ba15942..0000000000000 --- a/modules/transport-netty4/licenses/netty-resolver-4.1.100.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fe62f9ccd41b8660d07639dbbab8ae1edd6f2720 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.101.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..f4425d5ac8e5f --- /dev/null +++ b/modules/transport-netty4/licenses/netty-resolver-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +5e7eddfa4dfffcbd375d265d1fd08297df94f82f \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.100.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.100.Final.jar.sha1 deleted file mode 100644 index ab2819da570fd..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-4.1.100.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6620fbfb47667a5eb6050e35c7b4c88000bcd77f \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.101.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..fd9199092452b --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +0b36cc2337b4a4135df7ee2f2d77431c3b541dc5 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 deleted file mode 100644 index 30d7758302e37..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -648ff5571022dbfa6789122e3872477bbf67fa7b \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.101.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..291c87ad7987b --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +d0697ef1d71c6111a1b18a387fa985fd6398ace2 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/jackson-annotations-2.15.2.jar.sha1 b/plugins/crypto-kms/licenses/jackson-annotations-2.15.2.jar.sha1 deleted file mode 100644 index f63416ddb8ceb..0000000000000 --- a/plugins/crypto-kms/licenses/jackson-annotations-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4724a65ac8e8d156a24898d50fd5dbd3642870b8 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/jackson-annotations-2.16.0.jar.sha1 b/plugins/crypto-kms/licenses/jackson-annotations-2.16.0.jar.sha1 new file mode 100644 index 0000000000000..79ed9e0c63fc8 --- /dev/null +++ b/plugins/crypto-kms/licenses/jackson-annotations-2.16.0.jar.sha1 @@ -0,0 +1 @@ +dc30995f7428c0a405eba9b8c619b20d2b3b9905 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/jackson-databind-2.15.2.jar.sha1 b/plugins/crypto-kms/licenses/jackson-databind-2.15.2.jar.sha1 deleted file mode 100644 index f16d80af8dce6..0000000000000 --- a/plugins/crypto-kms/licenses/jackson-databind-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9353b021f10c307c00328f52090de2bdb4b6ff9c \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/jackson-databind-2.16.0.jar.sha1 b/plugins/crypto-kms/licenses/jackson-databind-2.16.0.jar.sha1 new file mode 100644 index 0000000000000..da00d281934b1 --- /dev/null +++ b/plugins/crypto-kms/licenses/jackson-databind-2.16.0.jar.sha1 @@ -0,0 +1 @@ +3a6b7f8ff7b30d518bbd65678e9c30cd881f19a7 \ No newline at end of file diff --git a/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/KmsMasterKeyProvider.java b/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/KmsMasterKeyProvider.java index 9003e8bebd5ff..19e000628a9c8 100644 --- a/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/KmsMasterKeyProvider.java +++ b/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/KmsMasterKeyProvider.java @@ -59,7 +59,10 @@ public DataKeyPair generateDataPair() { @Override public byte[] decryptKey(byte[] encryptedKey) { try (AmazonKmsClientReference clientReference = clientReferenceSupplier.get()) { - DecryptRequest decryptRequest = DecryptRequest.builder().ciphertextBlob(SdkBytes.fromByteArray(encryptedKey)).build(); + DecryptRequest decryptRequest = DecryptRequest.builder() + .ciphertextBlob(SdkBytes.fromByteArray(encryptedKey)) + .encryptionContext(encryptionContext) + .build(); DecryptResponse decryptResponse = SocketAccess.doPrivileged(() -> clientReference.get().decrypt(decryptRequest)); return decryptResponse.plaintext().asByteArray(); } diff --git a/plugins/discovery-azure-classic/build.gradle b/plugins/discovery-azure-classic/build.gradle index 1fed446016647..c3d70e9c64968 100644 --- a/plugins/discovery-azure-classic/build.gradle +++ b/plugins/discovery-azure-classic/build.gradle @@ -53,7 +53,7 @@ dependencies { api "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}" api "commons-codec:commons-codec:${versions.commonscodec}" api "commons-lang:commons-lang:2.6" - api "commons-io:commons-io:2.13.0" + api "commons-io:commons-io:2.15.1" api 'javax.mail:mail:1.4.7' api 'javax.inject:javax.inject:1' api "com.sun.jersey:jersey-client:${versions.jersey}" diff --git a/plugins/discovery-azure-classic/licenses/commons-io-2.13.0.jar.sha1 b/plugins/discovery-azure-classic/licenses/commons-io-2.13.0.jar.sha1 deleted file mode 100644 index c165136eb5822..0000000000000 --- a/plugins/discovery-azure-classic/licenses/commons-io-2.13.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8bb2bc9b4df17e2411533a0708a69f983bf5e83b \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/commons-io-2.15.1.jar.sha1 b/plugins/discovery-azure-classic/licenses/commons-io-2.15.1.jar.sha1 new file mode 100644 index 0000000000000..47c5d13812a36 --- /dev/null +++ b/plugins/discovery-azure-classic/licenses/commons-io-2.15.1.jar.sha1 @@ -0,0 +1 @@ +f11560da189ab563a5c8e351941415430e9304ea \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-annotations-2.15.2.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-annotations-2.15.2.jar.sha1 deleted file mode 100644 index f63416ddb8ceb..0000000000000 --- a/plugins/discovery-ec2/licenses/jackson-annotations-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4724a65ac8e8d156a24898d50fd5dbd3642870b8 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-annotations-2.16.0.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-annotations-2.16.0.jar.sha1 new file mode 100644 index 0000000000000..79ed9e0c63fc8 --- /dev/null +++ b/plugins/discovery-ec2/licenses/jackson-annotations-2.16.0.jar.sha1 @@ -0,0 +1 @@ +dc30995f7428c0a405eba9b8c619b20d2b3b9905 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-databind-2.15.2.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-databind-2.15.2.jar.sha1 deleted file mode 100644 index f16d80af8dce6..0000000000000 --- a/plugins/discovery-ec2/licenses/jackson-databind-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9353b021f10c307c00328f52090de2bdb4b6ff9c \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-databind-2.16.0.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-databind-2.16.0.jar.sha1 new file mode 100644 index 0000000000000..da00d281934b1 --- /dev/null +++ b/plugins/discovery-ec2/licenses/jackson-databind-2.16.0.jar.sha1 @@ -0,0 +1 @@ +3a6b7f8ff7b30d518bbd65678e9c30cd881f19a7 \ No newline at end of file diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index 8945c09fca28b..57a2493053956 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -57,7 +57,7 @@ dependencies { runtimeOnly "com.google.guava:guava:${versions.guava}" // Other dependencies api 'org.tukaani:xz:1.9' - api 'commons-io:commons-io:2.14.0' + api 'commons-io:commons-io:2.15.0' api "org.slf4j:slf4j-api:${versions.slf4j}" // character set detection @@ -79,7 +79,7 @@ dependencies { api "org.apache.poi:poi:${versions.poi}" api "org.apache.poi:poi-ooxml-lite:${versions.poi}" api "commons-codec:commons-codec:${versions.commonscodec}" - api 'org.apache.xmlbeans:xmlbeans:5.1.1' + api 'org.apache.xmlbeans:xmlbeans:5.2.0' api 'org.apache.commons:commons-collections4:4.4' // MS Office api "org.apache.poi:poi-scratchpad:${versions.poi}" diff --git a/plugins/ingest-attachment/licenses/commons-io-2.14.0.jar.sha1 b/plugins/ingest-attachment/licenses/commons-io-2.14.0.jar.sha1 deleted file mode 100644 index 33c5cfe53e01d..0000000000000 --- a/plugins/ingest-attachment/licenses/commons-io-2.14.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a4c6e1f6c196339473cd2e1b037f0eb97c62755b \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/commons-io-2.15.0.jar.sha1 b/plugins/ingest-attachment/licenses/commons-io-2.15.0.jar.sha1 new file mode 100644 index 0000000000000..73709383fd130 --- /dev/null +++ b/plugins/ingest-attachment/licenses/commons-io-2.15.0.jar.sha1 @@ -0,0 +1 @@ +5c3c2db10f6f797430a7f9c696b4d1273768c924 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/xmlbeans-5.1.1.jar.sha1 b/plugins/ingest-attachment/licenses/xmlbeans-5.1.1.jar.sha1 deleted file mode 100644 index 4d1d2ad0807e7..0000000000000 --- a/plugins/ingest-attachment/licenses/xmlbeans-5.1.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -48a369df0eccb509d46203104e4df9cb00f0f68b \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/xmlbeans-5.2.0.jar.sha1 b/plugins/ingest-attachment/licenses/xmlbeans-5.2.0.jar.sha1 new file mode 100644 index 0000000000000..f34274d593697 --- /dev/null +++ b/plugins/ingest-attachment/licenses/xmlbeans-5.2.0.jar.sha1 @@ -0,0 +1 @@ +6198ac997b3f234f2b5393fa415f78fac2e06510 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-annotations-2.15.2.jar.sha1 b/plugins/repository-azure/licenses/jackson-annotations-2.15.2.jar.sha1 deleted file mode 100644 index f63416ddb8ceb..0000000000000 --- a/plugins/repository-azure/licenses/jackson-annotations-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4724a65ac8e8d156a24898d50fd5dbd3642870b8 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-annotations-2.16.0.jar.sha1 b/plugins/repository-azure/licenses/jackson-annotations-2.16.0.jar.sha1 new file mode 100644 index 0000000000000..79ed9e0c63fc8 --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-annotations-2.16.0.jar.sha1 @@ -0,0 +1 @@ +dc30995f7428c0a405eba9b8c619b20d2b3b9905 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-databind-2.15.2.jar.sha1 b/plugins/repository-azure/licenses/jackson-databind-2.15.2.jar.sha1 deleted file mode 100644 index f16d80af8dce6..0000000000000 --- a/plugins/repository-azure/licenses/jackson-databind-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9353b021f10c307c00328f52090de2bdb4b6ff9c \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-databind-2.16.0.jar.sha1 b/plugins/repository-azure/licenses/jackson-databind-2.16.0.jar.sha1 new file mode 100644 index 0000000000000..da00d281934b1 --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-databind-2.16.0.jar.sha1 @@ -0,0 +1 @@ +3a6b7f8ff7b30d518bbd65678e9c30cd881f19a7 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-dataformat-xml-2.15.2.jar.sha1 b/plugins/repository-azure/licenses/jackson-dataformat-xml-2.15.2.jar.sha1 deleted file mode 100644 index 18c388b84f333..0000000000000 --- a/plugins/repository-azure/licenses/jackson-dataformat-xml-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e7e9038dee5c1adb1ebd07d3669e0e1182ac5b60 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-dataformat-xml-2.16.0.jar.sha1 b/plugins/repository-azure/licenses/jackson-dataformat-xml-2.16.0.jar.sha1 new file mode 100644 index 0000000000000..f0d165ff7cf82 --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-dataformat-xml-2.16.0.jar.sha1 @@ -0,0 +1 @@ +f3cdb002e0f2f30ad9c5fd053d78b1a485511ab1 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.15.2.jar.sha1 b/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.15.2.jar.sha1 deleted file mode 100644 index 6aa4f9b99c274..0000000000000 --- a/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -30d16ec2aef6d8094c5e2dce1d95034ca8b6cb42 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.16.0.jar.sha1 b/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.16.0.jar.sha1 new file mode 100644 index 0000000000000..40379694f5ea5 --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.16.0.jar.sha1 @@ -0,0 +1 @@ +77e3a27823f795d928b897d8444744ddb044a5c3 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.15.2.jar.sha1 b/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.15.2.jar.sha1 deleted file mode 100644 index 80da08928f855..0000000000000 --- a/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6a22fd1c7b0f9788e81eea32c11dc8c1ba421f18 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.16.0.jar.sha1 b/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.16.0.jar.sha1 new file mode 100644 index 0000000000000..820d14b3df8e4 --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.16.0.jar.sha1 @@ -0,0 +1 @@ +684daae9ea45087c670b4f6511edcfdb19c3a695 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.100.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.100.Final.jar.sha1 deleted file mode 100644 index dfa4a0fbea94c..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-dns-4.1.100.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -663b1b7bf3ff0f12fde4df20c72d9e94584ebffa \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.101.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..0789dea62778a --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-dns-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +24d0f4f2b0b0b55acc498d62f9c0921d0ea6da7b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 deleted file mode 100644 index bf5605151406e..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cbf1a430ea44dbdedbcde16b185cbb95f28d72c7 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.101.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..48b42bfab9287 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-http2-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +40590da6c615b852384542051330e9fd51d4c4b1 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.100.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.100.Final.jar.sha1 deleted file mode 100644 index 8e9bc8c96aec7..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-socks-4.1.100.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a9fbf4d64b08abed542eefd5f7aed4807edca56f \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.101.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..fb2d1aa25a5ec --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-socks-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +cbaac9e2c2d7b86e4c1bda220cde49949c9bcaee \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.100.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.100.Final.jar.sha1 deleted file mode 100644 index 35d9d82202274..0000000000000 --- a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.100.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -af3cf676eed30184215426ecf0f0dde15555ea9c \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.101.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..ca030677c32d0 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +c198ee61abba7ae6b3cb6b1033f04fb0ae79d512 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.100.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.100.Final.jar.sha1 deleted file mode 100644 index 0948daa05fff6..0000000000000 --- a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.100.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -62dbdf5f25eda75ea8456be1ed72b3fcb0d18774 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.101.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..450c34d975bb9 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +d3f503d7554cf25edc6b83b9b4724b240c6e91d5 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 deleted file mode 100644 index 30d7758302e37..0000000000000 --- a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -648ff5571022dbfa6789122e3872477bbf67fa7b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.101.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..291c87ad7987b --- /dev/null +++ b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +d0697ef1d71c6111a1b18a387fa985fd6398ace2 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.1.12.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.1.12.jar.sha1 deleted file mode 100644 index 352d69396d0c9..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-core-1.1.12.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -378dc5a375e6440099e837b22cf4b01341cbe4ea \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.1.13.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.1.13.jar.sha1 new file mode 100644 index 0000000000000..5eaf96739ed72 --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-core-1.1.13.jar.sha1 @@ -0,0 +1 @@ +faea23e582978a34f6a932b81e86206ec2314990 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.1.12.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.1.12.jar.sha1 deleted file mode 100644 index 1bcb0e0c52950..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-http-1.1.12.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e839fadb8f45d8a7a2783466faedd03373366c23 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.1.13.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.1.13.jar.sha1 new file mode 100644 index 0000000000000..091125169c696 --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-http-1.1.13.jar.sha1 @@ -0,0 +1 @@ +c5af7bc746050d080891a5446cca2c96a0c51d03 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.100.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.100.Final.jar.sha1 deleted file mode 100644 index a9aa34392903e..0000000000000 --- a/plugins/repository-hdfs/licenses/netty-all-4.1.100.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5ef15a3ce29a792b7ad17438e5f84c617b3f2993 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.101.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..093ced04bee4b --- /dev/null +++ b/plugins/repository-hdfs/licenses/netty-all-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +2761d380bc97e6ce64745dd9ca04538cedd40a5b \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-annotations-2.15.2.jar.sha1 b/plugins/repository-s3/licenses/jackson-annotations-2.15.2.jar.sha1 deleted file mode 100644 index f63416ddb8ceb..0000000000000 --- a/plugins/repository-s3/licenses/jackson-annotations-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4724a65ac8e8d156a24898d50fd5dbd3642870b8 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-annotations-2.16.0.jar.sha1 b/plugins/repository-s3/licenses/jackson-annotations-2.16.0.jar.sha1 new file mode 100644 index 0000000000000..79ed9e0c63fc8 --- /dev/null +++ b/plugins/repository-s3/licenses/jackson-annotations-2.16.0.jar.sha1 @@ -0,0 +1 @@ +dc30995f7428c0a405eba9b8c619b20d2b3b9905 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-databind-2.15.2.jar.sha1 b/plugins/repository-s3/licenses/jackson-databind-2.15.2.jar.sha1 deleted file mode 100644 index f16d80af8dce6..0000000000000 --- a/plugins/repository-s3/licenses/jackson-databind-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9353b021f10c307c00328f52090de2bdb4b6ff9c \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-databind-2.16.0.jar.sha1 b/plugins/repository-s3/licenses/jackson-databind-2.16.0.jar.sha1 new file mode 100644 index 0000000000000..da00d281934b1 --- /dev/null +++ b/plugins/repository-s3/licenses/jackson-databind-2.16.0.jar.sha1 @@ -0,0 +1 @@ +3a6b7f8ff7b30d518bbd65678e9c30cd881f19a7 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.100.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.100.Final.jar.sha1 deleted file mode 100644 index aaf2e35302d77..0000000000000 --- a/plugins/repository-s3/licenses/netty-buffer-4.1.100.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -39b05d2d4027971bf99111a9be1d7035a116bb55 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.101.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..7bb27bbfcb87d --- /dev/null +++ b/plugins/repository-s3/licenses/netty-buffer-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +a4d94fd6cdf7a37b15237e32434afd6b955cc591 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.100.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.100.Final.jar.sha1 deleted file mode 100644 index a77333ea8ae47..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-4.1.100.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9c3c71e7cf3b8ce3bfc9fa52a524b9ca7ddf259c \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.101.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..c792b4e834030 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +642523c57c4be15b8b461be7b1532262d193bbb3 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.100.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.100.Final.jar.sha1 deleted file mode 100644 index 6f26bf4e6a9b5..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http-4.1.100.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -992623e7d8f2d96e41faf1687bb963f5433e3517 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.101.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..8bf31f6c3a2c7 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +c648f863b301e3fc62d0de098ec61be7628b8ea2 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 deleted file mode 100644 index bf5605151406e..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cbf1a430ea44dbdedbcde16b185cbb95f28d72c7 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.101.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..48b42bfab9287 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http2-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +40590da6c615b852384542051330e9fd51d4c4b1 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.100.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.100.Final.jar.sha1 deleted file mode 100644 index d2ff72db60d1f..0000000000000 --- a/plugins/repository-s3/licenses/netty-common-4.1.100.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -847f942381145de23f21c836d05b0677474271d3 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.101.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..f4fe86799eaff --- /dev/null +++ b/plugins/repository-s3/licenses/netty-common-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +bdb16e4d308b5757123decc886896815d1daf7a3 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.100.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.100.Final.jar.sha1 deleted file mode 100644 index f12a6046e96d0..0000000000000 --- a/plugins/repository-s3/licenses/netty-handler-4.1.100.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4c0acdb8bb73647ebb3847ac2d503d53d72c02b4 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.101.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..cfc0befee237a --- /dev/null +++ b/plugins/repository-s3/licenses/netty-handler-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +7800aea949bf95f63df73166d144e49405b279dd \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.100.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.100.Final.jar.sha1 deleted file mode 100644 index 8e4179ba15942..0000000000000 --- a/plugins/repository-s3/licenses/netty-resolver-4.1.100.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fe62f9ccd41b8660d07639dbbab8ae1edd6f2720 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.101.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..f4425d5ac8e5f --- /dev/null +++ b/plugins/repository-s3/licenses/netty-resolver-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +5e7eddfa4dfffcbd375d265d1fd08297df94f82f \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.100.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.100.Final.jar.sha1 deleted file mode 100644 index ab2819da570fd..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-4.1.100.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6620fbfb47667a5eb6050e35c7b4c88000bcd77f \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.101.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..fd9199092452b --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +0b36cc2337b4a4135df7ee2f2d77431c3b541dc5 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.100.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.100.Final.jar.sha1 deleted file mode 100644 index 5805fdaf411d1..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.100.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -78489936ca1d91483e34a31d04a3b0812386eb39 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.101.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..a710d964c1803 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +f86303830ab65680e45cabb531e37078ecc6791e \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 deleted file mode 100644 index 30d7758302e37..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -648ff5571022dbfa6789122e3872477bbf67fa7b \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.101.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..291c87ad7987b --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +d0697ef1d71c6111a1b18a387fa985fd6398ace2 \ No newline at end of file diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3AsyncService.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3AsyncService.java index 262304029a0d3..d691cad9c9d03 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3AsyncService.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3AsyncService.java @@ -374,7 +374,7 @@ private static IrsaCredentials buildFromEnvironment(IrsaCredentials defaults) { return new IrsaCredentials(webIdentityTokenFile, roleArn, roleSessionName); } - private synchronized void releaseCachedClients() { + public synchronized void releaseCachedClients() { // the clients will shutdown when they will not be used anymore for (final AmazonAsyncS3Reference clientReference : clientsCache.values()) { clientReference.decRef(); diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java index c1180aab0e0c7..3a55fcb0bdbcd 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java @@ -91,6 +91,7 @@ import org.opensearch.repositories.s3.async.UploadRequest; import org.opensearch.repositories.s3.utils.HttpRangeUtils; +import java.io.BufferedInputStream; import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; @@ -188,10 +189,38 @@ public void asyncBlobUpload(WriteContext writeContext, ActionListener comp writeContext.getWritePriority(), writeContext.getUploadFinalizer(), writeContext.doRemoteDataIntegrityCheck(), - writeContext.getExpectedChecksum() + writeContext.getExpectedChecksum(), + blobStore.isUploadRetryEnabled() ); try { - long partSize = blobStore.getAsyncTransferManager().calculateOptimalPartSize(writeContext.getFileSize()); + if (uploadRequest.getContentLength() > ByteSizeUnit.GB.toBytes(10) && blobStore.isRedirectLargeUploads()) { + StreamContext streamContext = SocketAccess.doPrivileged( + () -> writeContext.getStreamProvider(uploadRequest.getContentLength()) + ); + InputStreamContainer inputStream = streamContext.provideStream(0); + try { + executeMultipartUpload( + blobStore, + uploadRequest.getKey(), + inputStream.getInputStream(), + uploadRequest.getContentLength() + ); + completionListener.onResponse(null); + } catch (Exception ex) { + logger.error( + () -> new ParameterizedMessage( + "Failed to upload large file {} of size {} ", + uploadRequest.getKey(), + uploadRequest.getContentLength() + ), + ex + ); + completionListener.onFailure(ex); + } + return; + } + long partSize = blobStore.getAsyncTransferManager() + .calculateOptimalPartSize(writeContext.getFileSize(), writeContext.getWritePriority(), blobStore.isUploadRetryEnabled()); StreamContext streamContext = SocketAccess.doPrivileged(() -> writeContext.getStreamProvider(partSize)); try (AmazonAsyncS3Reference amazonS3Reference = SocketAccess.doPrivileged(blobStore::asyncClientReference)) { @@ -537,8 +566,14 @@ void executeSingleUpload(final S3BlobStore blobStore, final String blobName, fin PutObjectRequest putObjectRequest = putObjectRequestBuilder.build(); try (AmazonS3Reference clientReference = blobStore.clientReference()) { + final InputStream requestInputStream; + if (blobStore.isUploadRetryEnabled()) { + requestInputStream = new BufferedInputStream(input, (int) (blobSize + 1)); + } else { + requestInputStream = input; + } SocketAccess.doPrivilegedVoid( - () -> clientReference.get().putObject(putObjectRequest, RequestBody.fromInputStream(input, blobSize)) + () -> clientReference.get().putObject(putObjectRequest, RequestBody.fromInputStream(requestInputStream, blobSize)) ); } catch (final SdkException e) { throw new IOException("Unable to upload object [" + blobName + "] using a single upload", e); @@ -578,6 +613,13 @@ void executeMultipartUpload(final S3BlobStore blobStore, final String blobName, createMultipartUploadRequestBuilder.serverSideEncryption(ServerSideEncryption.AES256); } + final InputStream requestInputStream; + if (blobStore.isUploadRetryEnabled()) { + requestInputStream = new BufferedInputStream(input, (int) (partSize + 1)); + } else { + requestInputStream = input; + } + CreateMultipartUploadRequest createMultipartUploadRequest = createMultipartUploadRequestBuilder.build(); try (AmazonS3Reference clientReference = blobStore.clientReference()) { uploadId.set( @@ -601,10 +643,9 @@ void executeMultipartUpload(final S3BlobStore blobStore, final String blobName, .build(); bytesCount += uploadPartRequest.contentLength(); - final UploadPartResponse uploadResponse = SocketAccess.doPrivileged( () -> clientReference.get() - .uploadPart(uploadPartRequest, RequestBody.fromInputStream(input, uploadPartRequest.contentLength())) + .uploadPart(uploadPartRequest, RequestBody.fromInputStream(requestInputStream, uploadPartRequest.contentLength())) ); parts.add(CompletedPart.builder().partNumber(uploadPartRequest.partNumber()).eTag(uploadResponse.eTag()).build()); } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java index e8e043357e126..fc70fbb0db00e 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java @@ -56,8 +56,10 @@ import static org.opensearch.repositories.s3.S3Repository.BUFFER_SIZE_SETTING; import static org.opensearch.repositories.s3.S3Repository.BULK_DELETE_SIZE; import static org.opensearch.repositories.s3.S3Repository.CANNED_ACL_SETTING; +import static org.opensearch.repositories.s3.S3Repository.REDIRECT_LARGE_S3_UPLOAD; import static org.opensearch.repositories.s3.S3Repository.SERVER_SIDE_ENCRYPTION_SETTING; import static org.opensearch.repositories.s3.S3Repository.STORAGE_CLASS_SETTING; +import static org.opensearch.repositories.s3.S3Repository.UPLOAD_RETRY_ENABLED; class S3BlobStore implements BlobStore { @@ -71,6 +73,10 @@ class S3BlobStore implements BlobStore { private volatile ByteSizeValue bufferSize; + private volatile boolean redirectLargeUploads; + + private volatile boolean uploadRetryEnabled; + private volatile boolean serverSideEncryption; private volatile ObjectCannedACL cannedACL; @@ -119,6 +125,9 @@ class S3BlobStore implements BlobStore { this.normalExecutorBuilder = normalExecutorBuilder; this.priorityExecutorBuilder = priorityExecutorBuilder; this.urgentExecutorBuilder = urgentExecutorBuilder; + // Settings to initialize blobstore with. + this.redirectLargeUploads = REDIRECT_LARGE_S3_UPLOAD.get(repositoryMetadata.settings()); + this.uploadRetryEnabled = UPLOAD_RETRY_ENABLED.get(repositoryMetadata.settings()); } @Override @@ -130,6 +139,8 @@ public void reload(RepositoryMetadata repositoryMetadata) { this.cannedACL = initCannedACL(CANNED_ACL_SETTING.get(repositoryMetadata.settings())); this.storageClass = initStorageClass(STORAGE_CLASS_SETTING.get(repositoryMetadata.settings())); this.bulkDeletesSize = BULK_DELETE_SIZE.get(repositoryMetadata.settings()); + this.redirectLargeUploads = REDIRECT_LARGE_S3_UPLOAD.get(repositoryMetadata.settings()); + this.uploadRetryEnabled = UPLOAD_RETRY_ENABLED.get(repositoryMetadata.settings()); } @Override @@ -149,6 +160,14 @@ int getMaxRetries() { return service.settings(repositoryMetadata).maxRetries; } + public boolean isRedirectLargeUploads() { + return redirectLargeUploads; + } + + public boolean isUploadRetryEnabled() { + return uploadRetryEnabled; + } + public String bucket() { return bucket; } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3ClientSettings.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3ClientSettings.java index 2392c66329e06..4fda0ee95a3ec 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3ClientSettings.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3ClientSettings.java @@ -177,7 +177,7 @@ final class S3ClientSettings { static final Setting.AffixSetting REQUEST_TIMEOUT_SETTING = Setting.affixKeySetting( PREFIX, "request_timeout", - key -> Setting.timeSetting(key, TimeValue.timeValueMinutes(2), Property.NodeScope) + key -> Setting.timeSetting(key, TimeValue.timeValueMinutes(5), Property.NodeScope) ); /** The connection timeout for connecting to s3. */ @@ -198,14 +198,14 @@ final class S3ClientSettings { static final Setting.AffixSetting MAX_CONNECTIONS_SETTING = Setting.affixKeySetting( PREFIX, "max_connections", - key -> Setting.intSetting(key, 100, Property.NodeScope) + key -> Setting.intSetting(key, 500, Property.NodeScope) ); /** Connection acquisition timeout for new connections to S3. */ static final Setting.AffixSetting CONNECTION_ACQUISITION_TIMEOUT = Setting.affixKeySetting( PREFIX, "connection_acquisition_timeout", - key -> Setting.timeSetting(key, TimeValue.timeValueMinutes(2), Property.NodeScope) + key -> Setting.timeSetting(key, TimeValue.timeValueMinutes(15), Property.NodeScope) ); /** The maximum pending connections to S3. */ diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java index 728a99b1220a6..f7772a57c9afd 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java @@ -147,6 +147,20 @@ class S3Repository extends MeteredBlobStoreRepository { */ static final ByteSizeValue MAX_FILE_SIZE_USING_MULTIPART = new ByteSizeValue(5, ByteSizeUnit.TB); + /** + * Whether large uploads need to be redirected to slow sync s3 client. + */ + static final Setting REDIRECT_LARGE_S3_UPLOAD = Setting.boolSetting( + "redirect_large_s3_upload", + true, + Setting.Property.NodeScope + ); + + /** + * Whether retry on uploads are enabled. This setting wraps inputstream with buffered stream to enable retries. + */ + static final Setting UPLOAD_RETRY_ENABLED = Setting.boolSetting("s3_upload_retry_enabled", true, Setting.Property.NodeScope); + /** * Minimum threshold below which the chunk is uploaded using a single request. Beyond this threshold, * the S3 repository will use the AWS Multipart Upload API to split the chunk into several parts, each of buffer_size length, and @@ -391,7 +405,9 @@ public void reload(RepositoryMetadata newRepositoryMetadata) { // Reload configs for S3RepositoryPlugin service.settings(metadata); + service.releaseCachedClients(); s3AsyncService.settings(metadata); + s3AsyncService.releaseCachedClients(); // Reload configs for S3BlobStore BlobStore blobStore = getBlobStore(); diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java index 9ed232464d080..e7d2a4d024e60 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java @@ -99,23 +99,32 @@ public S3RepositoryPlugin(final Settings settings, final Path configPath) { @Override public List> getExecutorBuilders(Settings settings) { List> executorBuilders = new ArrayList<>(); - int halfProcMaxAt5 = halfAllocatedProcessorsMaxFive(allocatedProcessors(settings)); + int halfProc = halfNumberOfProcessors(allocatedProcessors(settings)); executorBuilders.add( new FixedExecutorBuilder(settings, URGENT_FUTURE_COMPLETION, urgentPoolCount(settings), 10_000, URGENT_FUTURE_COMPLETION) ); - executorBuilders.add(new ScalingExecutorBuilder(URGENT_STREAM_READER, 1, halfProcMaxAt5, TimeValue.timeValueMinutes(5))); + executorBuilders.add(new ScalingExecutorBuilder(URGENT_STREAM_READER, 1, halfProc, TimeValue.timeValueMinutes(5))); executorBuilders.add( - new FixedExecutorBuilder(settings, PRIORITY_FUTURE_COMPLETION, priorityPoolCount(settings), 10_000, PRIORITY_FUTURE_COMPLETION) + new ScalingExecutorBuilder(PRIORITY_FUTURE_COMPLETION, 1, allocatedProcessors(settings), TimeValue.timeValueMinutes(5)) ); - executorBuilders.add(new ScalingExecutorBuilder(PRIORITY_STREAM_READER, 1, halfProcMaxAt5, TimeValue.timeValueMinutes(5))); + executorBuilders.add(new ScalingExecutorBuilder(PRIORITY_STREAM_READER, 1, halfProc, TimeValue.timeValueMinutes(5))); - executorBuilders.add(new FixedExecutorBuilder(settings, FUTURE_COMPLETION, normalPoolCount(settings), 10_000, FUTURE_COMPLETION)); - executorBuilders.add(new ScalingExecutorBuilder(STREAM_READER, 1, halfProcMaxAt5, TimeValue.timeValueMinutes(5))); + executorBuilders.add( + new ScalingExecutorBuilder(FUTURE_COMPLETION, 1, allocatedProcessors(settings), TimeValue.timeValueMinutes(5)) + ); + executorBuilders.add( + new ScalingExecutorBuilder( + STREAM_READER, + allocatedProcessors(settings), + 4 * allocatedProcessors(settings), + TimeValue.timeValueMinutes(5) + ) + ); return executorBuilders; } - static int halfAllocatedProcessorsMaxFive(final int allocatedProcessors) { - return boundedBy((allocatedProcessors + 1) / 2, 1, 5); + static int halfNumberOfProcessors(int numberOfProcessors) { + return (numberOfProcessors + 1) / 2; } S3RepositoryPlugin(final Settings settings, final Path configPath, final S3Service service, final S3AsyncService s3AsyncService) { @@ -252,7 +261,9 @@ public List> getSettings() { S3ClientSettings.IDENTITY_TOKEN_FILE_SETTING, S3ClientSettings.ROLE_SESSION_NAME_SETTING, S3Repository.PARALLEL_MULTIPART_UPLOAD_MINIMUM_PART_SIZE_SETTING, - S3Repository.PARALLEL_MULTIPART_UPLOAD_ENABLED_SETTING + S3Repository.PARALLEL_MULTIPART_UPLOAD_ENABLED_SETTING, + S3Repository.REDIRECT_LARGE_S3_UPLOAD, + S3Repository.UPLOAD_RETRY_ENABLED ); } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java index b1b3e19eac275..24387fb98a425 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java @@ -438,7 +438,7 @@ private static IrsaCredentials buildFromEnviroment(IrsaCredentials defaults) { return new IrsaCredentials(webIdentityTokenFile, roleArn, roleSessionName); } - private synchronized void releaseCachedClients() { + public synchronized void releaseCachedClients() { // the clients will shutdown when they will not be used anymore for (final AmazonS3Reference clientReference : clientsCache.values()) { clientReference.decRef(); diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/StatsMetricPublisher.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/StatsMetricPublisher.java index 0c63bfdb1ff97..8d2772d42ebca 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/StatsMetricPublisher.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/StatsMetricPublisher.java @@ -12,6 +12,8 @@ import software.amazon.awssdk.metrics.MetricPublisher; import software.amazon.awssdk.metrics.MetricRecord; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.opensearch.common.blobstore.BlobStore; import java.time.Duration; @@ -21,6 +23,7 @@ public class StatsMetricPublisher { + private static final Logger LOGGER = LogManager.getLogger(StatsMetricPublisher.class); private final Stats stats = new Stats(); private final Map extendedStats = new HashMap<>() { @@ -35,6 +38,7 @@ public class StatsMetricPublisher { public MetricPublisher listObjectsMetricPublisher = new MetricPublisher() { @Override public void publish(MetricCollection metricCollection) { + LOGGER.debug(() -> "List objects request metrics: " + metricCollection); for (MetricRecord metricRecord : metricCollection) { switch (metricRecord.metric().name()) { case "ApiCallDuration": @@ -64,6 +68,7 @@ public void close() {} public MetricPublisher deleteObjectsMetricPublisher = new MetricPublisher() { @Override public void publish(MetricCollection metricCollection) { + LOGGER.debug(() -> "Delete objects request metrics: " + metricCollection); for (MetricRecord metricRecord : metricCollection) { switch (metricRecord.metric().name()) { case "ApiCallDuration": @@ -93,6 +98,7 @@ public void close() {} public MetricPublisher getObjectMetricPublisher = new MetricPublisher() { @Override public void publish(MetricCollection metricCollection) { + LOGGER.debug(() -> "Get object request metrics: " + metricCollection); for (MetricRecord metricRecord : metricCollection) { switch (metricRecord.metric().name()) { case "ApiCallDuration": @@ -122,6 +128,7 @@ public void close() {} public MetricPublisher putObjectMetricPublisher = new MetricPublisher() { @Override public void publish(MetricCollection metricCollection) { + LOGGER.debug(() -> "Put object request metrics: " + metricCollection); for (MetricRecord metricRecord : metricCollection) { switch (metricRecord.metric().name()) { case "ApiCallDuration": @@ -151,6 +158,7 @@ public void close() {} public MetricPublisher multipartUploadMetricCollector = new MetricPublisher() { @Override public void publish(MetricCollection metricCollection) { + LOGGER.debug(() -> "Multi-part request metrics: " + metricCollection); for (MetricRecord metricRecord : metricCollection) { switch (metricRecord.metric().name()) { case "ApiCallDuration": diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncPartsHandler.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncPartsHandler.java index 933ee6dc29513..b4c4ed0ecaa75 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncPartsHandler.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncPartsHandler.java @@ -23,8 +23,8 @@ import org.opensearch.common.StreamContext; import org.opensearch.common.blobstore.stream.write.WritePriority; import org.opensearch.common.io.InputStreamContainer; -import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.repositories.s3.SocketAccess; +import org.opensearch.repositories.s3.StatsMetricPublisher; import org.opensearch.repositories.s3.io.CheckedContainer; import java.io.BufferedInputStream; @@ -54,6 +54,7 @@ public class AsyncPartsHandler { * @param uploadId Upload Id against which multi-part is being performed * @param completedParts Reference of completed parts * @param inputStreamContainers Checksum containers + * @param statsMetricPublisher sdk metric publisher * @return list of completable futures * @throws IOException thrown in case of an IO error */ @@ -66,7 +67,9 @@ public static List> uploadParts( StreamContext streamContext, String uploadId, AtomicReferenceArray completedParts, - AtomicReferenceArray inputStreamContainers + AtomicReferenceArray inputStreamContainers, + StatsMetricPublisher statsMetricPublisher, + boolean uploadRetryEnabled ) throws IOException { List> futures = new ArrayList<>(); for (int partIdx = 0; partIdx < streamContext.getNumberOfParts(); partIdx++) { @@ -77,6 +80,7 @@ public static List> uploadParts( .partNumber(partIdx + 1) .key(uploadRequest.getKey()) .uploadId(uploadId) + .overrideConfiguration(o -> o.addMetricPublisher(statsMetricPublisher.multipartUploadMetricCollector)) .contentLength(inputStreamContainer.getContentLength()); if (uploadRequest.doRemoteDataIntegrityCheck()) { uploadPartRequestBuilder.checksumAlgorithm(ChecksumAlgorithm.CRC32); @@ -91,7 +95,8 @@ public static List> uploadParts( futures, uploadPartRequestBuilder.build(), inputStreamContainer, - uploadRequest + uploadRequest, + uploadRetryEnabled ); } @@ -128,6 +133,18 @@ public static void cleanUpParts(S3AsyncClient s3AsyncClient, UploadRequest uploa })); } + public static InputStream maybeRetryInputStream( + InputStream inputStream, + WritePriority writePriority, + boolean uploadRetryEnabled, + long contentLength + ) { + if (uploadRetryEnabled == true && (writePriority == WritePriority.HIGH || writePriority == WritePriority.URGENT)) { + return new BufferedInputStream(inputStream, (int) (contentLength + 1)); + } + return inputStream; + } + private static void uploadPart( S3AsyncClient s3AsyncClient, ExecutorService executorService, @@ -138,7 +155,8 @@ private static void uploadPart( List> futures, UploadPartRequest uploadPartRequest, InputStreamContainer inputStreamContainer, - UploadRequest uploadRequest + UploadRequest uploadRequest, + boolean uploadRetryEnabled ) { Integer partNumber = uploadPartRequest.partNumber(); @@ -150,9 +168,13 @@ private static void uploadPart( } else { streamReadExecutor = executorService; } - // Buffered stream is needed to allow mark and reset ops during IO errors so that only buffered - // data can be retried instead of retrying whole file by the application. - InputStream inputStream = new BufferedInputStream(inputStreamContainer.getInputStream(), (int) (ByteSizeUnit.MB.toBytes(1) + 1)); + + InputStream inputStream = maybeRetryInputStream( + inputStreamContainer.getInputStream(), + uploadRequest.getWritePriority(), + uploadRetryEnabled, + uploadPartRequest.contentLength() + ); CompletableFuture uploadPartResponseFuture = SocketAccess.doPrivileged( () -> s3AsyncClient.uploadPart( uploadPartRequest, diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncTransferManager.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncTransferManager.java index 4f1ab9764702e..2259780c95276 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncTransferManager.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncTransferManager.java @@ -34,11 +34,11 @@ import org.opensearch.common.io.InputStreamContainer; import org.opensearch.common.util.ByteUtils; import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.repositories.s3.SocketAccess; import org.opensearch.repositories.s3.StatsMetricPublisher; import org.opensearch.repositories.s3.io.CheckedContainer; -import java.io.BufferedInputStream; import java.io.IOException; import java.io.InputStream; import java.util.Arrays; @@ -146,7 +146,14 @@ private void uploadInParts( handleException(returnFuture, () -> "Failed to initiate multipart upload", throwable); } else { log.debug(() -> "Initiated new multipart upload, uploadId: " + createMultipartUploadResponse.uploadId()); - doUploadInParts(s3AsyncClient, uploadRequest, streamContext, returnFuture, createMultipartUploadResponse.uploadId()); + doUploadInParts( + s3AsyncClient, + uploadRequest, + streamContext, + returnFuture, + createMultipartUploadResponse.uploadId(), + statsMetricPublisher + ); } }); } @@ -156,7 +163,8 @@ private void doUploadInParts( UploadRequest uploadRequest, StreamContext streamContext, CompletableFuture returnFuture, - String uploadId + String uploadId, + StatsMetricPublisher statsMetricPublisher ) { // The list of completed parts must be sorted @@ -174,7 +182,9 @@ private void doUploadInParts( streamContext, uploadId, completedParts, - inputStreamContainers + inputStreamContainers, + statsMetricPublisher, + uploadRequest.isUploadRetryEnabled() ); } catch (Exception ex) { try { @@ -198,7 +208,7 @@ private void doUploadInParts( } return null; }) - .thenCompose(ignore -> completeMultipartUpload(s3AsyncClient, uploadRequest, uploadId, completedParts)) + .thenCompose(ignore -> completeMultipartUpload(s3AsyncClient, uploadRequest, uploadId, completedParts, statsMetricPublisher)) .handle(handleExceptionOrResponse(s3AsyncClient, uploadRequest, returnFuture, uploadId)) .exceptionally(throwable -> { handleException(returnFuture, () -> "Unexpected exception occurred", throwable); @@ -245,7 +255,8 @@ private CompletableFuture completeMultipartUplo S3AsyncClient s3AsyncClient, UploadRequest uploadRequest, String uploadId, - AtomicReferenceArray completedParts + AtomicReferenceArray completedParts, + StatsMetricPublisher statsMetricPublisher ) { log.debug(() -> new ParameterizedMessage("Sending completeMultipartUploadRequest, uploadId: {}", uploadId)); @@ -254,6 +265,7 @@ private CompletableFuture completeMultipartUplo .bucket(uploadRequest.getBucket()) .key(uploadRequest.getKey()) .uploadId(uploadId) + .overrideConfiguration(o -> o.addMetricPublisher(statsMetricPublisher.multipartUploadMetricCollector)) .multipartUpload(CompletedMultipartUpload.builder().parts(parts).build()) .build(); @@ -291,10 +303,13 @@ private static void handleException(CompletableFuture returnFuture, Suppli /** * Calculates the optimal part size of each part request if the upload operation is carried out as multipart upload. */ - public long calculateOptimalPartSize(long contentLengthOfSource) { + public long calculateOptimalPartSize(long contentLengthOfSource, WritePriority writePriority, boolean uploadRetryEnabled) { if (contentLengthOfSource < ByteSizeUnit.MB.toBytes(5)) { return contentLengthOfSource; } + if (uploadRetryEnabled && (writePriority == WritePriority.HIGH || writePriority == WritePriority.URGENT)) { + return new ByteSizeValue(5, ByteSizeUnit.MB).getBytes(); + } double optimalPartSize = contentLengthOfSource / (double) MAX_UPLOAD_PARTS; optimalPartSize = Math.ceil(optimalPartSize); return (long) Math.max(optimalPartSize, minimumPartSize); @@ -324,9 +339,13 @@ private void uploadInOneChunk( } else { streamReadExecutor = executorService; } - // Buffered stream is needed to allow mark and reset ops during IO errors so that only buffered - // data can be retried instead of retrying whole file by the application. - InputStream inputStream = new BufferedInputStream(inputStreamContainer.getInputStream(), (int) (ByteSizeUnit.MB.toBytes(1) + 1)); + + InputStream inputStream = AsyncPartsHandler.maybeRetryInputStream( + inputStreamContainer.getInputStream(), + uploadRequest.getWritePriority(), + uploadRequest.isUploadRetryEnabled(), + uploadRequest.getContentLength() + ); CompletableFuture putObjectFuture = SocketAccess.doPrivileged( () -> s3AsyncClient.putObject( putObjectRequestBuilder.build(), diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/UploadRequest.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/UploadRequest.java index 3804c8417eb9f..a5304dc4a97d6 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/UploadRequest.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/UploadRequest.java @@ -25,6 +25,8 @@ public class UploadRequest { private final boolean doRemoteDataIntegrityCheck; private final Long expectedChecksum; + private boolean uploadRetryEnabled; + /** * Construct a new UploadRequest object * @@ -43,7 +45,8 @@ public UploadRequest( WritePriority writePriority, CheckedConsumer uploadFinalizer, boolean doRemoteDataIntegrityCheck, - Long expectedChecksum + Long expectedChecksum, + boolean uploadRetryEnabled ) { this.bucket = bucket; this.key = key; @@ -52,6 +55,7 @@ public UploadRequest( this.uploadFinalizer = uploadFinalizer; this.doRemoteDataIntegrityCheck = doRemoteDataIntegrityCheck; this.expectedChecksum = expectedChecksum; + this.uploadRetryEnabled = uploadRetryEnabled; } public String getBucket() { @@ -81,4 +85,8 @@ public boolean doRemoteDataIntegrityCheck() { public Long getExpectedChecksum() { return expectedChecksum; } + + public boolean isUploadRetryEnabled() { + return uploadRetryEnabled; + } } diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerMockClientTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerMockClientTests.java index 7c67519f2f3b0..8c7e196d7c812 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerMockClientTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerMockClientTests.java @@ -9,7 +9,10 @@ package org.opensearch.repositories.s3; import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.exception.SdkException; +import software.amazon.awssdk.core.sync.RequestBody; import software.amazon.awssdk.services.s3.S3AsyncClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest; import software.amazon.awssdk.services.s3.model.AbortMultipartUploadResponse; import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest; @@ -18,8 +21,10 @@ import software.amazon.awssdk.services.s3.model.CreateMultipartUploadResponse; import software.amazon.awssdk.services.s3.model.DeleteObjectRequest; import software.amazon.awssdk.services.s3.model.DeleteObjectResponse; +import software.amazon.awssdk.services.s3.model.ObjectCannedACL; import software.amazon.awssdk.services.s3.model.PutObjectRequest; import software.amazon.awssdk.services.s3.model.PutObjectResponse; +import software.amazon.awssdk.services.s3.model.StorageClass; import software.amazon.awssdk.services.s3.model.UploadPartRequest; import software.amazon.awssdk.services.s3.model.UploadPartResponse; @@ -37,6 +42,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.repositories.s3.async.AsyncExecutorContainer; import org.opensearch.repositories.s3.async.AsyncTransferEventLoopGroup; @@ -61,15 +67,21 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.IntStream; +import org.mockito.Mockito; import org.mockito.invocation.InvocationOnMock; import static org.opensearch.repositories.s3.S3Repository.BULK_DELETE_SIZE; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.any; import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class S3BlobContainerMockClientTests extends OpenSearchTestCase implements ConfigPathSupport { @@ -516,7 +528,7 @@ public InputStreamContainer apply(Integer partNo, Long size, Long position) thro } }, partSize, calculateLastPartSize(blobSize, partSize), calculateNumberOfParts(blobSize, partSize)); } - }, blobSize, false, WritePriority.HIGH, uploadSuccess -> { + }, blobSize, false, WritePriority.NORMAL, uploadSuccess -> { assertTrue(uploadSuccess); if (throwExceptionOnFinalizeUpload) { throw new RuntimeException(); @@ -546,4 +558,115 @@ private long calculateLastPartSize(long totalSize, long partSize) { private int calculateNumberOfParts(long contentLength, long partSize) { return (int) ((contentLength % partSize) == 0 ? contentLength / partSize : (contentLength / partSize) + 1); } + + public void testFailureWhenLargeFileRedirected() throws IOException, ExecutionException, InterruptedException { + testLargeFilesRedirectedToSlowSyncClient(true); + } + + public void testLargeFileRedirected() throws IOException, ExecutionException, InterruptedException { + testLargeFilesRedirectedToSlowSyncClient(false); + } + + private void testLargeFilesRedirectedToSlowSyncClient(boolean expectException) throws IOException, InterruptedException { + final ByteSizeValue partSize = new ByteSizeValue(1024, ByteSizeUnit.MB); + + int numberOfParts = 20; + final long lastPartSize = new ByteSizeValue(20, ByteSizeUnit.MB).getBytes(); + final long blobSize = ((numberOfParts - 1) * partSize.getBytes()) + lastPartSize; + CountDownLatch countDownLatch = new CountDownLatch(1); + AtomicReference exceptionRef = new AtomicReference<>(); + ActionListener completionListener = ActionListener.wrap(resp -> { countDownLatch.countDown(); }, ex -> { + exceptionRef.set(ex); + countDownLatch.countDown(); + }); + + final String bucketName = randomAlphaOfLengthBetween(1, 10); + + final BlobPath blobPath = new BlobPath(); + if (randomBoolean()) { + IntStream.of(randomIntBetween(1, 5)).forEach(value -> blobPath.add("path_" + value)); + } + + final long bufferSize = ByteSizeUnit.MB.toBytes(randomIntBetween(5, 1024)); + + final S3BlobStore blobStore = mock(S3BlobStore.class); + when(blobStore.bucket()).thenReturn(bucketName); + when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); + when(blobStore.bufferSizeInBytes()).thenReturn(bufferSize); + + final boolean serverSideEncryption = randomBoolean(); + when(blobStore.serverSideEncryption()).thenReturn(serverSideEncryption); + + final StorageClass storageClass = randomFrom(StorageClass.values()); + when(blobStore.getStorageClass()).thenReturn(storageClass); + when(blobStore.isRedirectLargeUploads()).thenReturn(true); + + final ObjectCannedACL cannedAccessControlList = randomBoolean() ? randomFrom(ObjectCannedACL.values()) : null; + if (cannedAccessControlList != null) { + when(blobStore.getCannedACL()).thenReturn(cannedAccessControlList); + } + + final S3Client client = mock(S3Client.class); + final AmazonS3Reference clientReference = Mockito.spy(new AmazonS3Reference(client)); + doNothing().when(clientReference).close(); + when(blobStore.clientReference()).thenReturn(clientReference); + final CreateMultipartUploadResponse createMultipartUploadResponse = CreateMultipartUploadResponse.builder() + .uploadId(randomAlphaOfLength(10)) + .build(); + when(client.createMultipartUpload(any(CreateMultipartUploadRequest.class))).thenReturn(createMultipartUploadResponse); + if (expectException) { + when(client.uploadPart(any(UploadPartRequest.class), any(RequestBody.class))).thenThrow( + SdkException.create("Expected upload part request to fail", new RuntimeException()) + ); + } else { + when(client.uploadPart(any(UploadPartRequest.class), any(RequestBody.class))).thenReturn(UploadPartResponse.builder().build()); + } + + // Fail the completion request + when(client.completeMultipartUpload(any(CompleteMultipartUploadRequest.class))).thenReturn( + CompleteMultipartUploadResponse.builder().build() + ); + when(client.abortMultipartUpload(any(AbortMultipartUploadRequest.class))).thenReturn( + AbortMultipartUploadResponse.builder().build() + ); + + List openInputStreams = new ArrayList<>(); + final S3BlobContainer s3BlobContainer = Mockito.spy(new S3BlobContainer(blobPath, blobStore)); + s3BlobContainer.asyncBlobUpload(new WriteContext("write_large_blob", new StreamContextSupplier() { + @Override + public StreamContext supplyStreamContext(long partSize) { + return new StreamContext(new CheckedTriFunction() { + @Override + public InputStreamContainer apply(Integer partNo, Long size, Long position) throws IOException { + InputStream inputStream = new OffsetRangeIndexInputStream(new ZeroIndexInput("desc", blobSize), size, position); + openInputStreams.add(inputStream); + return new InputStreamContainer(inputStream, size, position); + } + }, partSize, calculateLastPartSize(blobSize, partSize), calculateNumberOfParts(blobSize, partSize)); + } + }, blobSize, false, WritePriority.HIGH, uploadSuccess -> { assertTrue(uploadSuccess); }, false, null), completionListener); + + assertTrue(countDownLatch.await(5000, TimeUnit.SECONDS)); + if (expectException) { + assertNotNull(exceptionRef.get()); + } else { + assertNull(exceptionRef.get()); + } + verify(s3BlobContainer, times(1)).executeMultipartUpload(any(S3BlobStore.class), anyString(), any(InputStream.class), anyLong()); + + if (expectException) { + verify(client, times(1)).abortMultipartUpload(any(AbortMultipartUploadRequest.class)); + } else { + verify(client, times(0)).abortMultipartUpload(any(AbortMultipartUploadRequest.class)); + } + + openInputStreams.forEach(inputStream -> { + try { + inputStream.close(); + } catch (IOException ex) { + logger.error("Error closing input stream"); + } + }); + } + } diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3ClientSettingsTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3ClientSettingsTests.java index 61c9c998b1dec..f27c8387b6e45 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3ClientSettingsTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3ClientSettingsTests.java @@ -70,10 +70,10 @@ public void testThereIsADefaultClientByDefault() { assertThat(defaultSettings.protocol, is(Protocol.HTTPS)); assertThat(defaultSettings.proxySettings, is(ProxySettings.NO_PROXY_SETTINGS)); assertThat(defaultSettings.readTimeoutMillis, is(50 * 1000)); - assertThat(defaultSettings.requestTimeoutMillis, is(120 * 1000)); + assertThat(defaultSettings.requestTimeoutMillis, is(5 * 60 * 1000)); assertThat(defaultSettings.connectionTimeoutMillis, is(10 * 1000)); assertThat(defaultSettings.connectionTTLMillis, is(5 * 1000)); - assertThat(defaultSettings.maxConnections, is(100)); + assertThat(defaultSettings.maxConnections, is(500)); assertThat(defaultSettings.maxRetries, is(3)); assertThat(defaultSettings.throttleRetries, is(true)); } diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/async/AsyncTransferManagerTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/async/AsyncTransferManagerTests.java index 2437547a80a6f..b753b847df869 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/async/AsyncTransferManagerTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/async/AsyncTransferManagerTests.java @@ -82,7 +82,7 @@ public void testOneChunkUpload() { s3AsyncClient, new UploadRequest("bucket", "key", ByteSizeUnit.MB.toBytes(1), WritePriority.HIGH, uploadSuccess -> { // do nothing - }, false, null), + }, false, null, true), new StreamContext((partIdx, partSize, position) -> { streamRef.set(new ZeroInputStream(partSize)); return new InputStreamContainer(streamRef.get(), partSize, position); @@ -127,7 +127,7 @@ public void testOneChunkUploadCorruption() { s3AsyncClient, new UploadRequest("bucket", "key", ByteSizeUnit.MB.toBytes(1), WritePriority.HIGH, uploadSuccess -> { // do nothing - }, false, null), + }, false, null, true), new StreamContext( (partIdx, partSize, position) -> new InputStreamContainer(new ZeroInputStream(partSize), partSize, position), ByteSizeUnit.MB.toBytes(1), @@ -180,7 +180,7 @@ public void testMultipartUpload() { s3AsyncClient, new UploadRequest("bucket", "key", ByteSizeUnit.MB.toBytes(5), WritePriority.HIGH, uploadSuccess -> { // do nothing - }, true, 3376132981L), + }, true, 3376132981L, true), new StreamContext((partIdx, partSize, position) -> { InputStream stream = new ZeroInputStream(partSize); streams.add(stream); @@ -240,7 +240,7 @@ public void testMultipartUploadCorruption() { s3AsyncClient, new UploadRequest("bucket", "key", ByteSizeUnit.MB.toBytes(5), WritePriority.HIGH, uploadSuccess -> { // do nothing - }, true, 0L), + }, true, 0L, true), new StreamContext( (partIdx, partSize, position) -> new InputStreamContainer(new ZeroInputStream(partSize), partSize, position), ByteSizeUnit.MB.toBytes(1), diff --git a/plugins/telemetry-otel/build.gradle b/plugins/telemetry-otel/build.gradle index f5c367cb7643b..9be83e30c3183 100644 --- a/plugins/telemetry-otel/build.gradle +++ b/plugins/telemetry-otel/build.gradle @@ -78,6 +78,7 @@ thirdPartyAudit { 'org.conscrypt.ConscryptHostnameVerifier', 'org.openjsse.javax.net.ssl.SSLParameters', 'org.openjsse.javax.net.ssl.SSLSocket', + 'io.opentelemetry.api.events.EventBuilder', 'io.opentelemetry.api.events.EventEmitter', 'io.opentelemetry.api.events.EventEmitterBuilder', 'io.opentelemetry.api.events.EventEmitterProvider', @@ -86,7 +87,8 @@ thirdPartyAudit { 'io.opentelemetry.sdk.autoconfigure.spi.metrics.ConfigurableMetricExporterProvider', 'io.opentelemetry.sdk.autoconfigure.spi.traces.ConfigurableSpanExporterProvider', 'kotlin.io.path.PathsKt', - 'io.opentelemetry.sdk.autoconfigure.spi.traces.ConfigurableSpanExporterProvider' + 'io.opentelemetry.sdk.autoconfigure.spi.traces.ConfigurableSpanExporterProvider', + 'io.opentelemetry.sdk.autoconfigure.spi.internal.AutoConfigureListener' ) } diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-1.31.0.jar.sha1 deleted file mode 100644 index eae141a8d1a23..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-api-1.31.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bb24a44d73484c681c236aed84fe6c28d17f30e2 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-1.32.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-1.32.0.jar.sha1 new file mode 100644 index 0000000000000..2c038aad4b934 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-api-1.32.0.jar.sha1 @@ -0,0 +1 @@ +a5c081d8f877225732efe13908f350029c811709 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-context-1.31.0.jar.sha1 deleted file mode 100644 index 6e42973adc581..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-context-1.31.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b8004737f7a970124e36ac71fde8eb88423e8cee \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-1.32.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-context-1.32.0.jar.sha1 new file mode 100644 index 0000000000000..3243f524432eb --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-context-1.32.0.jar.sha1 @@ -0,0 +1 @@ +c5f8bb68084ea5709a27e935907b1bb49d0bd049 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.31.0.jar.sha1 deleted file mode 100644 index b119468e7f88b..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.31.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b7b4baf5f9af72d5eb8a231dfb114ae31c57150d \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.32.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.32.0.jar.sha1 new file mode 100644 index 0000000000000..1d7da47286ae0 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.32.0.jar.sha1 @@ -0,0 +1 @@ +3643061da474061ffa7f2036a58a7a0d40212276 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.31.0.jar.sha1 deleted file mode 100644 index 8f653922d6418..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.31.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -260e5363dad83a0ae65c16ad6a3dd2914e0db201 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.32.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.32.0.jar.sha1 new file mode 100644 index 0000000000000..3fab0e47adcbe --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.32.0.jar.sha1 @@ -0,0 +1 @@ +ab56c7223112fac13a66e3f667c5fc666f4a3707 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.31.0.jar.sha1 deleted file mode 100644 index 103da4720de96..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.31.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b6454464425dfd81519070caeca3824558a2f1ae \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.32.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.32.0.jar.sha1 new file mode 100644 index 0000000000000..f93cf7a63bfad --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.32.0.jar.sha1 @@ -0,0 +1 @@ +5752d171cd08ac84f9273258a315bc5f97e1187e \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.31.0.jar.sha1 deleted file mode 100644 index 3db07532ceea9..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.31.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d8c22b6851bbc3dbf5d2387b9bde158ed5416ba4 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.32.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.32.0.jar.sha1 new file mode 100644 index 0000000000000..2fc33b62aee54 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.32.0.jar.sha1 @@ -0,0 +1 @@ +6b41cd66a385d513b58b6617f20b701435b64abd \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.31.0.jar.sha1 deleted file mode 100644 index 10d9b7cdfe3e3..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.31.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dd209381d58cfe81a989e29c9ca26d97c8dabd7a \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.32.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.32.0.jar.sha1 new file mode 100644 index 0000000000000..99f758b047aa2 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.32.0.jar.sha1 @@ -0,0 +1 @@ +9346006cead763247a786b5cabf3e1ae3c88eadb \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.31.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.31.0-alpha.jar.sha1 deleted file mode 100644 index 162890965a6eb..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.31.0-alpha.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6c9f5c063309d92b6dd28bff0667f54b63afd36f \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.32.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.32.0-alpha.jar.sha1 new file mode 100644 index 0000000000000..705a342a684c4 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.32.0-alpha.jar.sha1 @@ -0,0 +1 @@ +fab56e187e3fb3c70c18223184d53a76500114ab \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.31.0.jar.sha1 deleted file mode 100644 index d6ce31a31cc6f..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.31.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2b2093be08a09ac536292bf6cecf8129cc7fb191 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.32.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.32.0.jar.sha1 new file mode 100644 index 0000000000000..31818695cc774 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.32.0.jar.sha1 @@ -0,0 +1 @@ +504de8cc7dc68e84c8c7c2757522d934e9c50d35 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.31.0.jar.sha1 deleted file mode 100644 index 8a6a9705d836d..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.31.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f492528288236e097e12fc1c45963dd82c70d33c \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.32.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.32.0.jar.sha1 new file mode 100644 index 0000000000000..3cf3080a98bd9 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.32.0.jar.sha1 @@ -0,0 +1 @@ +454c7a6afab864de9f0c166246f28f16aaa824c1 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.31.0.jar.sha1 deleted file mode 100644 index 37d79f5c573f7..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.31.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a63a203d3dc6f8875f8c26b9e3b522dc9a3f6280 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.32.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.32.0.jar.sha1 new file mode 100644 index 0000000000000..41b0dca07556e --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.32.0.jar.sha1 @@ -0,0 +1 @@ +b054760243906af0a327a8f5bd99adc2826ccd88 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.31.0.jar.sha1 deleted file mode 100644 index 80179e4808f50..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.31.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -47cc23762fae728d68e4fda1dfb71986ae0b8b3e \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.32.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.32.0.jar.sha1 new file mode 100644 index 0000000000000..2f71fd5cc780a --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.32.0.jar.sha1 @@ -0,0 +1 @@ +bff24f085193e105d4e23e3db27bf81ccb3d830e \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.31.0.jar.sha1 deleted file mode 100644 index fd917a58ba77c..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.31.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a3941197cfb8ae9eb9e482073480c0c3918b746c \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.32.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.32.0.jar.sha1 new file mode 100644 index 0000000000000..f0060b8a0f78f --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.32.0.jar.sha1 @@ -0,0 +1 @@ +d80ad3210fa890a856a1d04379d134ab44a09501 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.21.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.21.0-alpha.jar.sha1 deleted file mode 100644 index 77b12c99464f6..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.21.0-alpha.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -207660e74d1e155272e9559fd4d27854b92fc6ac \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.23.1-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.23.1-alpha.jar.sha1 new file mode 100644 index 0000000000000..e730c83af905e --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.23.1-alpha.jar.sha1 @@ -0,0 +1 @@ +218e361772670212a46be5940010222d68e66f2a \ No newline at end of file diff --git a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerEnabledSanityIT.java b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerEnabledSanityIT.java index f07f2b308e801..156dc344d1ae2 100644 --- a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerEnabledSanityIT.java +++ b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerEnabledSanityIT.java @@ -61,20 +61,22 @@ public void testSanityChecksWhenTracingEnabled() throws Exception { // Create Index and ingest data String indexName = "test-index-11"; - Settings basicSettings = Settings.builder().put("number_of_shards", 3).put("number_of_replicas", 0).build(); + Settings basicSettings = Settings.builder() + .put("number_of_shards", 2) + .put("number_of_replicas", 0) + .put("index.routing.allocation.total_shards_per_node", 1) + .build(); createIndex(indexName, basicSettings); - indexRandom(true, client.prepareIndex(indexName).setId("1").setSource("field1", "the fox jumps in the well")); - indexRandom(true, client.prepareIndex(indexName).setId("1").setSource("field2", "another fox did the same.")); + + indexRandom(false, client.prepareIndex(indexName).setId("1").setSource("field1", "the fox jumps in the well")); + indexRandom(false, client.prepareIndex(indexName).setId("2").setSource("field2", "another fox did the same.")); ensureGreen(); refresh(); // Make the search calls; adding the searchType and PreFilterShardSize to make the query path predictable across all the runs. - client.prepareSearch().setSearchType("query_then_fetch").setPreFilterShardSize(3).setQuery(queryStringQuery("fox")).get(); - client.prepareSearch().setSearchType("query_then_fetch").setPreFilterShardSize(3).setQuery(queryStringQuery("jumps")).get(); - - ensureGreen(); - refresh(); + client.prepareSearch().setSearchType("dfs_query_then_fetch").setPreFilterShardSize(2).setQuery(queryStringQuery("fox")).get(); + client.prepareSearch().setSearchType("dfs_query_then_fetch").setPreFilterShardSize(2).setQuery(queryStringQuery("jumps")).get(); // Sleep for about 3s to wait for traces are published, delay is (the delay is 1s). Thread.sleep(3000); @@ -88,8 +90,10 @@ public void testSanityChecksWhenTracingEnabled() throws Exception { ) ); + // See please https://github.com/opensearch-project/OpenSearch/issues/10291 till local transport is not instrumented, + // capturing only the inter-nodes transport actions. InMemorySingletonSpanExporter exporter = InMemorySingletonSpanExporter.INSTANCE; - validators.validate(exporter.getFinishedSpanItems(), 6); + validators.validate(exporter.getFinishedSpanItems(), 4); } private static void updateTelemetrySetting(Client client, boolean value) { diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingContextPropagator.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingContextPropagator.java index f8fe885ee450c..0fb05a08c27bb 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingContextPropagator.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingContextPropagator.java @@ -10,8 +10,8 @@ import org.opensearch.core.common.Strings; +import java.util.Collection; import java.util.Collections; -import java.util.List; import java.util.Map; import java.util.Optional; import java.util.function.BiConsumer; @@ -51,7 +51,7 @@ private static OTelPropagatedSpan getPropagatedSpan(Context context) { } @Override - public Optional extractFromHeaders(Map> headers) { + public Optional extractFromHeaders(Map> headers) { Context context = openTelemetry.getPropagators().getTextMapPropagator().extract(Context.current(), headers, HEADER_TEXT_MAP_GETTER); return Optional.ofNullable(getPropagatedSpan(context)); } @@ -87,9 +87,9 @@ public String get(Map headers, String key) { } }; - private static final TextMapGetter>> HEADER_TEXT_MAP_GETTER = new TextMapGetter<>() { + private static final TextMapGetter>> HEADER_TEXT_MAP_GETTER = new TextMapGetter<>() { @Override - public Iterable keys(Map> headers) { + public Iterable keys(Map> headers) { if (headers != null) { return headers.keySet(); } else { @@ -98,7 +98,7 @@ public Iterable keys(Map> headers) { } @Override - public String get(Map> headers, String key) { + public String get(Map> headers, String key) { if (headers != null && headers.containsKey(key)) { return Strings.collectionToCommaDelimitedString(headers.get(key)); } diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingContextPropagatorTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingContextPropagatorTests.java index 16a3ec9493d5d..d865a329104c1 100644 --- a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingContextPropagatorTests.java +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingContextPropagatorTests.java @@ -11,8 +11,8 @@ import org.opensearch.test.OpenSearchTestCase; import java.util.Arrays; +import java.util.Collection; import java.util.HashMap; -import java.util.List; import java.util.Map; import io.opentelemetry.api.OpenTelemetry; @@ -57,7 +57,7 @@ public void testExtractTracerContextFromHeader() { } public void testExtractTracerContextFromHttpHeader() { - Map> requestHeaders = new HashMap<>(); + Map> requestHeaders = new HashMap<>(); requestHeaders.put("traceparent", Arrays.asList("00-" + TRACE_ID + "-" + SPAN_ID + "-00")); OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class); when(mockOpenTelemetry.getPropagators()).thenReturn(ContextPropagators.create(W3CTraceContextPropagator.getInstance())); diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.100.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.100.Final.jar.sha1 deleted file mode 100644 index aaf2e35302d77..0000000000000 --- a/plugins/transport-nio/licenses/netty-buffer-4.1.100.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -39b05d2d4027971bf99111a9be1d7035a116bb55 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.101.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..7bb27bbfcb87d --- /dev/null +++ b/plugins/transport-nio/licenses/netty-buffer-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +a4d94fd6cdf7a37b15237e32434afd6b955cc591 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.100.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.100.Final.jar.sha1 deleted file mode 100644 index a77333ea8ae47..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-4.1.100.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9c3c71e7cf3b8ce3bfc9fa52a524b9ca7ddf259c \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.101.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..c792b4e834030 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +642523c57c4be15b8b461be7b1532262d193bbb3 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.100.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.100.Final.jar.sha1 deleted file mode 100644 index 6f26bf4e6a9b5..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-http-4.1.100.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -992623e7d8f2d96e41faf1687bb963f5433e3517 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.101.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..8bf31f6c3a2c7 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-http-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +c648f863b301e3fc62d0de098ec61be7628b8ea2 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.100.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.100.Final.jar.sha1 deleted file mode 100644 index d2ff72db60d1f..0000000000000 --- a/plugins/transport-nio/licenses/netty-common-4.1.100.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -847f942381145de23f21c836d05b0677474271d3 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.101.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..f4fe86799eaff --- /dev/null +++ b/plugins/transport-nio/licenses/netty-common-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +bdb16e4d308b5757123decc886896815d1daf7a3 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.100.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.100.Final.jar.sha1 deleted file mode 100644 index f12a6046e96d0..0000000000000 --- a/plugins/transport-nio/licenses/netty-handler-4.1.100.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4c0acdb8bb73647ebb3847ac2d503d53d72c02b4 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.101.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..cfc0befee237a --- /dev/null +++ b/plugins/transport-nio/licenses/netty-handler-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +7800aea949bf95f63df73166d144e49405b279dd \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.100.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.100.Final.jar.sha1 deleted file mode 100644 index 8e4179ba15942..0000000000000 --- a/plugins/transport-nio/licenses/netty-resolver-4.1.100.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fe62f9ccd41b8660d07639dbbab8ae1edd6f2720 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.101.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..f4425d5ac8e5f --- /dev/null +++ b/plugins/transport-nio/licenses/netty-resolver-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +5e7eddfa4dfffcbd375d265d1fd08297df94f82f \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.100.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.100.Final.jar.sha1 deleted file mode 100644 index ab2819da570fd..0000000000000 --- a/plugins/transport-nio/licenses/netty-transport-4.1.100.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6620fbfb47667a5eb6050e35c7b4c88000bcd77f \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.101.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..fd9199092452b --- /dev/null +++ b/plugins/transport-nio/licenses/netty-transport-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +0b36cc2337b4a4135df7ee2f2d77431c3b541dc5 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.100.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.100.Final.jar.sha1 deleted file mode 100644 index aaf2e35302d77..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.100.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -39b05d2d4027971bf99111a9be1d7035a116bb55 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.101.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..7bb27bbfcb87d --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +a4d94fd6cdf7a37b15237e32434afd6b955cc591 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.100.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.100.Final.jar.sha1 deleted file mode 100644 index a77333ea8ae47..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.100.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9c3c71e7cf3b8ce3bfc9fa52a524b9ca7ddf259c \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.101.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..c792b4e834030 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +642523c57c4be15b8b461be7b1532262d193bbb3 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.100.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.100.Final.jar.sha1 deleted file mode 100644 index dfa4a0fbea94c..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.100.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -663b1b7bf3ff0f12fde4df20c72d9e94584ebffa \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.101.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..0789dea62778a --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +24d0f4f2b0b0b55acc498d62f9c0921d0ea6da7b \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.100.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.100.Final.jar.sha1 deleted file mode 100644 index 6f26bf4e6a9b5..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.100.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -992623e7d8f2d96e41faf1687bb963f5433e3517 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.101.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..8bf31f6c3a2c7 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +c648f863b301e3fc62d0de098ec61be7628b8ea2 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 deleted file mode 100644 index bf5605151406e..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cbf1a430ea44dbdedbcde16b185cbb95f28d72c7 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.101.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..48b42bfab9287 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +40590da6c615b852384542051330e9fd51d4c4b1 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.100.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.100.Final.jar.sha1 deleted file mode 100644 index d2ff72db60d1f..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.100.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -847f942381145de23f21c836d05b0677474271d3 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.101.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..f4fe86799eaff --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +bdb16e4d308b5757123decc886896815d1daf7a3 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.100.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.100.Final.jar.sha1 deleted file mode 100644 index f12a6046e96d0..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.100.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4c0acdb8bb73647ebb3847ac2d503d53d72c02b4 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.101.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..cfc0befee237a --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +7800aea949bf95f63df73166d144e49405b279dd \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.100.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.100.Final.jar.sha1 deleted file mode 100644 index 8e4179ba15942..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.100.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fe62f9ccd41b8660d07639dbbab8ae1edd6f2720 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.101.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..f4425d5ac8e5f --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +5e7eddfa4dfffcbd375d265d1fd08297df94f82f \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.100.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.100.Final.jar.sha1 deleted file mode 100644 index 0948daa05fff6..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.100.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -62dbdf5f25eda75ea8456be1ed72b3fcb0d18774 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.101.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..450c34d975bb9 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +d3f503d7554cf25edc6b83b9b4724b240c6e91d5 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.100.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.100.Final.jar.sha1 deleted file mode 100644 index ab2819da570fd..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.100.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6620fbfb47667a5eb6050e35c7b4c88000bcd77f \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.101.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..fd9199092452b --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +0b36cc2337b4a4135df7ee2f2d77431c3b541dc5 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 deleted file mode 100644 index 30d7758302e37..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -648ff5571022dbfa6789122e3872477bbf67fa7b \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.101.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.101.Final.jar.sha1 new file mode 100644 index 0000000000000..291c87ad7987b --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.101.Final.jar.sha1 @@ -0,0 +1 @@ +d0697ef1d71c6111a1b18a387fa985fd6398ace2 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.12.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.12.jar.sha1 deleted file mode 100644 index 352d69396d0c9..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.12.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -378dc5a375e6440099e837b22cf4b01341cbe4ea \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.13.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.13.jar.sha1 new file mode 100644 index 0000000000000..5eaf96739ed72 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.13.jar.sha1 @@ -0,0 +1 @@ +faea23e582978a34f6a932b81e86206ec2314990 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.12.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.12.jar.sha1 deleted file mode 100644 index 1bcb0e0c52950..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.12.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e839fadb8f45d8a7a2783466faedd03373366c23 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.13.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.13.jar.sha1 new file mode 100644 index 0000000000000..091125169c696 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.13.jar.sha1 @@ -0,0 +1 @@ +c5af7bc746050d080891a5446cca2c96a0c51d03 \ No newline at end of file diff --git a/release-notes/opensearch.release-notes-2.11.1.md b/release-notes/opensearch.release-notes-2.11.1.md new file mode 100644 index 0000000000000..06613558de177 --- /dev/null +++ b/release-notes/opensearch.release-notes-2.11.1.md @@ -0,0 +1,10 @@ +## 2023-11-20 Version 2.11.1 Release Notes + +## [2.11.1] + +### Changed +- Use iterative approach to evaluate Regex.simpleMatch ([#11060](https://github.com/opensearch-project/OpenSearch/pull/11060)) + +### Fixed +- [BUG] Disable sort optimization for HALF_FLOAT ([#10999](https://github.com/opensearch-project/OpenSearch/pull/10999)) +- Adding version condition while adding geoshape doc values to the index, to ensure backward compatibility.([#11095](https://github.com/opensearch-project/OpenSearch/pull/11095)) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.sort/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.sort/10_basic.yml index b9089689b0cf1..3b7ea15164e9f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.sort/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.sort/10_basic.yml @@ -156,3 +156,23 @@ query: {"range": { "rank": { "from": 0 } } } track_total_hits: false size: 3 + +--- +"Index Sort half float": + - do: + catch: bad_request + indices.create: + index: test + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + index.sort.field: rank + mappings: + properties: + rank: + type: half_float + + # This should failed with 400 as half_float is not supported for index sort + - match: { status: 400 } + - match: { error.type: illegal_argument_exception } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/10_histogram.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/10_histogram.yml index 3b16cdb13a22f..e7da9a0bc454c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/10_histogram.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/10_histogram.yml @@ -598,7 +598,6 @@ setup: - match: { aggregations.histo.buckets.0.doc_count: 2 } - match: { profile.shards.0.aggregations.0.type: DateHistogramAggregator } - match: { profile.shards.0.aggregations.0.description: histo } - - match: { profile.shards.0.aggregations.0.breakdown.collect_count: 4 } - match: { profile.shards.0.aggregations.0.debug.total_buckets: 3 } --- diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/240_date_nanos.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/240_date_nanos.yml index 1ddba45c97c72..fb5c3268a3c82 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/240_date_nanos.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/240_date_nanos.yml @@ -164,3 +164,99 @@ setup: - match: { aggregations.date.buckets.1.key: 1540857600000 } - match: { aggregations.date.buckets.1.key_as_string: "2018-10-30T00:00:00.000Z" } - match: { aggregations.date.buckets.1.doc_count: 2 } + + +--- +"date with nested sort now": + - skip: + version: " - 2.11.99" + reason: fixed in 2.12.0 + + # This tests cover scenario where nested sort have now() in date field type. + # For this test, we have date field as nested field and we trigger asc/desc sort + # on that nested field. `filter` clause is needed when we sort any nested field, + # like in this case, "gte": "now/h" says sort nested field date_field only where + # document is having value greater than current time now(). + # Nested field sort query doesn't sort documents if it is not qualified through + # `filter` clause. + # Only adding tests for `gte` as `lte` would be same behaviour + + - do: + indices.create: + index: test + body: + mappings: + properties: + nested_field: + type: nested + properties: + date_field: + type: date + format: date_optional_time + - do: + bulk: + refresh: true + index: test + body: | + {"index":{}} + {"nested_field": [{"date_field": "3023-10-26T12:00:00+09:00"}]} + {"index":{}} + {"nested_field": [{"date_field": "3024-10-26T12:00:00+09:00"}]} + {"index":{}} + {"nested_field": [{"date_field": "3025-10-26T12:00:00+09:00"}]} + {"index":{}} + {"nested_field": [{"date_field": "3026-10-26T12:00:00+09:00"}]} + {"index":{}} + {"nested_field": [{"date_field": "3027-10-26T12:00:00+09:00"}]} + {"index":{}} + {"nested_field": [{"date_field": "2022-10-26T12:00:00+09:00"}]} + {"index":{}} + {"nested_field": [{"date_field": "2023-10-26T12:00:00+09:00"}]} + {"index":{}} + {"nested_field": [{"date_field": "2021-10-26T12:00:00+09:00"}]} + {"index":{}} + {"nested_field": [{"date_field": "2020-10-26T12:00:00+09:00"}]} + {"index":{}} + {"nested_field": [{"date_field": "2019-10-26T12:00:00+09:00"}]} + + # gte: now/h with the desc sort + - do: + search: + index: test + body: + size: 5 + sort: [{ nested_field.date_field: { mode: max, order: desc, nested: { path: nested_field, filter: { bool: { filter : [{ range : { nested_field.date_field: { gte: now/h, time_zone: +09:00} } }] } } } } } ] + - match: {hits.total.value: 10 } + - length: {hits.hits: 5 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.nested_field.0.date_field: "3027-10-26T12:00:00+09:00" } + - match: { hits.hits.0.sort: [33381428400000] } + - match: { hits.hits.1._source.nested_field.0.date_field: "3026-10-26T12:00:00+09:00" } + - match: { hits.hits.1.sort: [ 33349892400000 ] } + - match: { hits.hits.2._source.nested_field.0.date_field: "3025-10-26T12:00:00+09:00" } + - match: { hits.hits.2.sort: [ 33318356400000 ] } + - match: { hits.hits.3._source.nested_field.0.date_field: "3024-10-26T12:00:00+09:00" } + - match: { hits.hits.3.sort: [ 33286820400000 ] } + - match: { hits.hits.4._source.nested_field.0.date_field: "3023-10-26T12:00:00+09:00" } + - match: { hits.hits.4.sort: [ 33255198000000 ] } + + # gte: now/h with the asc sort + - do: + search: + index: test + body: + size: 5 + sort: [ { nested_field.date_field: { mode: max, order: asc, nested: { path: nested_field, filter: { bool: { filter: [ { range: { nested_field.date_field: { gte: now/h, time_zone: +09:00 } } } ] } } } } } ] + - match: { hits.total.value: 10 } + - length: { hits.hits: 5 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.nested_field.0.date_field: "3023-10-26T12:00:00+09:00" } + - match: { hits.hits.0.sort: [ 33255198000000 ] } + - match: { hits.hits.1._source.nested_field.0.date_field: "3024-10-26T12:00:00+09:00" } + - match: { hits.hits.1.sort: [ 33286820400000 ] } + - match: { hits.hits.2._source.nested_field.0.date_field: "3025-10-26T12:00:00+09:00" } + - match: { hits.hits.2.sort: [ 33318356400000 ] } + - match: { hits.hits.3._source.nested_field.0.date_field: "3026-10-26T12:00:00+09:00" } + - match: { hits.hits.3.sort: [ 33349892400000 ] } + - match: { hits.hits.4._source.nested_field.0.date_field: "3027-10-26T12:00:00+09:00" } + - match: { hits.hits.4.sort: [ 33381428400000 ] } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_mixed.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_mixed.yml index ba2b18eb3b6d0..7385eef051243 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_mixed.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_mixed.yml @@ -20,6 +20,7 @@ properties: counter: type: double + - do: bulk: refresh: true @@ -119,3 +120,87 @@ - match: { status: 400 } - match: { error.type: search_phase_execution_exception } - match: { error.caused_by.reason: "Can't do sort across indices, as a field has [unsigned_long] type in one index, and different type in another index!" } + +--- +"search across indices with mixed long and double and float numeric types": + - skip: + version: " - 2.11.0" + reason: half float was broken before 2.11.1 + + - do: + indices.create: + index: test_1 + body: + mappings: + properties: + counter: + type: long + + - do: + indices.create: + index: test_2 + body: + mappings: + properties: + counter: + type: double + + - do: + indices.create: + index: test_3 + body: + mappings: + properties: + counter: + type: half_float + + - do: + bulk: + refresh: true + body: + - index: + _index: test_1 + - counter: 223372036854775800 + - index: + _index: test_2 + - counter: 1223372036854775800.23 + - index: + _index: test_2 + - counter: 184.4 + - index: + _index: test_3 + - counter: 187.4 + - index: + _index: test_3 + - counter: 194.4 + + - do: + search: + index: test_* + rest_total_hits_as_int: true + body: + sort: [{ counter: desc }] + - match: { hits.total: 5 } + - length: { hits.hits: 5 } + - match: { hits.hits.0._index: test_2 } + - match: { hits.hits.0._source.counter: 1223372036854775800.23 } + - match: { hits.hits.0.sort.0: 1223372036854775800.23 } + - match: { hits.hits.1._index: test_1 } + - match: { hits.hits.1._source.counter: 223372036854775800 } + - match: { hits.hits.1.sort.0: 223372036854775800 } + - match: { hits.hits.2._index: test_3 } + - match: { hits.hits.2._source.counter: 194.4 } + + - do: + search: + index: test_* + rest_total_hits_as_int: true + body: + sort: [{ counter: asc }] + - match: { hits.total: 5 } + - length: { hits.hits: 5 } + - match: { hits.hits.0._index: test_2 } + - match: { hits.hits.0._source.counter: 184.4 } + - match: { hits.hits.0.sort.0: 184.4 } + - match: { hits.hits.1._index: test_3 } + - match: { hits.hits.1._source.counter: 187.4 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/90_search_after.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/90_search_after.yml index 55e1566656faf..ebecb63dedbaf 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/90_search_after.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/90_search_after.yml @@ -219,7 +219,7 @@ - match: {hits.hits.0._source.timestamp: "2019-10-21 08:30:04.828733" } - match: {hits.hits.0.sort: [1571646604828733000] } - # search_after with the sort + # search_after with the asc sort - do: search: index: test @@ -320,3 +320,468 @@ - length: { hits.hits: 1 } - match: { hits.hits.0._index: test } - match: { hits.hits.0._source.population: null } + +--- +"half float": + - skip: + version: " - 2.11.0" + reason: half_float was broken for 2.11.0 and earlier + + - do: + indices.create: + index: test + body: + mappings: + properties: + population: + type: half_float + - do: + bulk: + refresh: true + index: test + body: | + {"index":{}} + {"population": 184.4} + {"index":{}} + {"population": 194.4} + {"index":{}} + {"population": 144.4} + {"index":{}} + {"population": 174.4} + {"index":{}} + {"population": 164.4} + + - do: + search: + index: test + rest_total_hits_as_int: true + body: + size: 3 + sort: [ { population: desc } ] + - match: { hits.total: 5 } + - length: { hits.hits: 3 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.population: 194.4 } + - match: { hits.hits.1._index: test } + - match: { hits.hits.1._source.population: 184.4 } + - match: { hits.hits.2._index: test } + - match: { hits.hits.2._source.population: 174.4 } + + - do: + search: + index: test + rest_total_hits_as_int: true + body: + size: 3 + sort: [ { population: asc } ] + - match: { hits.total: 5 } + - length: { hits.hits: 3 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.population: 144.4 } + - match: { hits.hits.1._index: test } + - match: { hits.hits.1._source.population: 164.4 } + - match: { hits.hits.2._index: test } + - match: { hits.hits.2._source.population: 174.4 } + + # search_after with the asc sort + - do: + search: + index: test + rest_total_hits_as_int: true + body: + size: 1 + sort: [ { population: asc } ] + search_after: [ 184.375 ] # this is rounded sort value in sort result + - match: { hits.total: 5 } + - length: { hits.hits: 1 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.population: 194.4 } + + # search_after with the desc sort + - do: + search: + index: test + rest_total_hits_as_int: true + body: + size: 1 + sort: [ { population: desc } ] + search_after: [ 164.375 ] # this is rounded sort value in sort result + - match: { hits.total: 5 } + - length: { hits.hits: 1 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.population: 144.4 } + + # search_after with the asc sort with missing + - do: + bulk: + refresh: true + index: test + body: | + {"index":{}} + {"population": null} + - do: + search: + index: test + rest_total_hits_as_int: true + body: + "size": 5 + "sort": [ { "population": { "order": "asc", "missing": "_last" } } ] + search_after: [ 200 ] # making it out of min/max so only missing value hit is qualified + + - match: { hits.total: 6 } + - length: { hits.hits: 1 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.population: null } + + # search_after with the desc sort with missing + - do: + search: + index: test + rest_total_hits_as_int: true + body: + "size": 5 + "sort": [ { "population": { "order": "desc", "missing": "_last" } } ] + search_after: [ 100 ] # making it out of min/max so only missing value hit is qualified + + - match: { hits.total: 6 } + - length: { hits.hits: 1 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.population: null } + +--- +"numeric skipping logic with competitive missing value": + - skip: + version: " - 2.12.99" + reason: newly added test, supported from 3.0.0 + +# This test checks if skipping logic is skipped in case missing values are competitive +# for all numeric type int, long, float, half_float, double, unsigned_long. +# We are inserting 24 documents with some missing values and giving search after parameter +# as missing value. The secondary sort field is on id which doesn't have missing value. +# In case skipping logic is applied in Lucene, it will skipp all documents with primary sort field +# missing value even though it should list sort by secondary field id with missing value primary field. +# This test is addressing bugs like here https://github.com/opensearch-project/OpenSearch/issues/9537 + + - do: + indices.create: + index: test + body: + mappings: + properties: + halffloat: + type: half_float + long: + type: long + int: + type: integer + float: + type: float + double: + type: double + unsignedlong: + type: unsigned_long + - do: + bulk: + refresh: true + index: test + body: | + {"index":{}} + {"id": 1, "halffloat": 1, "long": 1, "int": 1, "float": 1, "double": 1, "unsignedlong": 1} + {"index":{}} + {"id": 2, "halffloat": 2, "long": 2, "int": 2, "float": 2, "double": 2, "unsignedlong": 2} + {"index":{}} + {"id": 3, "halffloat": 3, "long": 3, "int": 3, "float": 3, "double": 3, "unsignedlong": 3} + {"index":{}} + {"id": 4, "halffloat": 4, "long": 4, "int": 4, "float": 4, "double": 4, "unsignedlong": 4} + {"index":{}} + {"id": 5, "halffloat": 5, "long": 5, "int": 5, "float": 5, "double": 5, "unsignedlong": 5} + {"index":{}} + {"id": 6, "halffloat": 6, "long": 6, "int": 6, "float": 6, "double": 6, "unsignedlong": 6} + {"index":{}} + {"id": 7, "halffloat": 7, "long": 7, "int": 7, "float": 7, "double": 7, "unsignedlong": 7} + {"index":{}} + {"id": 8, "halffloat": 8, "long": 8, "int": 8, "float": 8, "double": 8, "unsignedlong": 8} + {"index":{}} + {"id": 9, "halffloat": 9, "long": 9, "int": 9, "float": 9, "double": 9, "unsignedlong": 9} + {"index":{}} + {"id": 10, "halffloat": 10, "long": 10, "int": 10, "float": 10, "double": 10, "unsignedlong": 10} + {"index":{}} + {"id": 11, "halffloat": 11, "long": 11, "int": 11, "float": 11, "double": 11, "unsignedlong": 11} + {"index":{}} + {"id": 12, "halffloat": 12, "long": 12, "int": 12, "float": 12, "double": 12, "unsignedlong": 12} + {"index":{}} + {"id": 13, "halffloat": 13, "long": 13, "int": 13, "float": 13, "double": 13, "unsignedlong": 13} + {"index":{}} + {"id": 14, "halffloat": 14, "long": 14, "int": 14, "float": 14, "double": 14, "unsignedlong": 14} + {"index":{}} + {"id": 15, "halffloat": 15, "long": 15, "int": 15, "float": 15, "double": 15, "unsignedlong": 15} + {"index":{}} + {"id": 16, "halffloat": 16, "long": 16, "int": 16, "float": 16, "double": 16, "unsignedlong": 16} + {"index":{}} + {"id": 17, "halffloat": 17, "long": 17, "int": 17, "float": 17, "double": 17, "unsignedlong": 17} + {"index":{}} + {"id": 18, "halffloat": 18, "long": 18, "int": 18, "float": 18, "double": 18, "unsignedlong": 18} + {"index":{}} + {"id": 19, "halffloat": 19, "long": 19, "int": 19, "float": 19, "double": 19, "unsignedlong": 19} + {"index":{}} + {"id": 20, "halffloat": 20, "long": 20, "int": 20, "float": 20, "double": 20, "unsignedlong": 20} + {"index":{}} + {"id": 21, "halffloat": null, "long": null, "int": null, "float": null, "double": null, "unsignedlong": null} + {"index":{}} + {"id": 22, "halffloat": null, "long": null, "int": null, "float": null, "double": null, "unsignedlong": null} + {"index":{}} + {"id": 23, "halffloat": null, "long": null, "int": null, "float": null, "double": null, "unsignedlong": null} + {"index":{}} + {"id": 24, "halffloat": null, "long": null, "int": null, "float": null, "double": null, "unsignedlong": null} + + - do: + search: + index: test + rest_total_hits_as_int: true + body: + "size": 3 + "sort": [ { "halffloat": { "order": "asc" } }, { "id": { "order": "asc" } } ] + search_after: [ 200, 0 ] # making it out of min/max so only missing value hit is qualified + + - match: { hits.total: 24 } + - length: { hits.hits: 3 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.halffloat: null } + - match: { hits.hits.0._source.id: 21 } + - match: { hits.hits.1._index: test } + - match: { hits.hits.1._source.halffloat: null } + - match: { hits.hits.1._source.id: 22 } + - match: { hits.hits.2._index: test } + - match: { hits.hits.2._source.halffloat: null } + - match: { hits.hits.2._source.id: 23 } + + - do: + search: + index: test + rest_total_hits_as_int: true + body: + "size": 3 + "sort": [ { "halffloat": { "order": "desc" } }, { "id": { "order": "desc" } } ] + search_after: [ 0, 25 ] # making it out of min/max so only missing value hit is qualified + + - match: { hits.total: 24 } + - length: { hits.hits: 3 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.halffloat: null } + - match: { hits.hits.0._source.id: 24 } + - match: { hits.hits.1._index: test } + - match: { hits.hits.1._source.halffloat: null } + - match: { hits.hits.1._source.id: 23 } + - match: { hits.hits.2._index: test } + - match: { hits.hits.2._source.halffloat: null } + - match: { hits.hits.2._source.id: 22 } + + - do: + search: + index: test + rest_total_hits_as_int: true + body: + "size": 3 + "sort": [ { "long": { "order": "asc" } }, { "id": { "order": "asc" } } ] + search_after: [ 200, 0 ] # making it out of min/max so only missing value hit is qualified + + - match: { hits.total: 24 } + - length: { hits.hits: 3 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.long: null } + - match: { hits.hits.0._source.id: 21 } + - match: { hits.hits.1._index: test } + - match: { hits.hits.1._source.long: null } + - match: { hits.hits.1._source.id: 22 } + - match: { hits.hits.2._index: test } + - match: { hits.hits.2._source.long: null } + - match: { hits.hits.2._source.id: 23 } + + - do: + search: + index: test + rest_total_hits_as_int: true + body: + "size": 3 + "sort": [ { "long": { "order": "desc" } }, { "id": { "order": "desc" } } ] + search_after: [ 0, 25 ] # making it out of min/max so only missing value hit is qualified + + - match: { hits.total: 24 } + - length: { hits.hits: 3 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.long: null } + - match: { hits.hits.0._source.id: 24 } + - match: { hits.hits.1._index: test } + - match: { hits.hits.1._source.long: null } + - match: { hits.hits.1._source.id: 23 } + - match: { hits.hits.2._index: test } + - match: { hits.hits.2._source.long: null } + - match: { hits.hits.2._source.id: 22 } + + - do: + search: + index: test + rest_total_hits_as_int: true + body: + "size": 3 + "sort": [ { "int": { "order": "asc" } }, { "id": { "order": "asc" } } ] + search_after: [ 200, 0 ] # making it out of min/max so only missing value hit is qualified + + - match: { hits.total: 24 } + - length: { hits.hits: 3 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.int: null } + - match: { hits.hits.0._source.id: 21 } + - match: { hits.hits.1._index: test } + - match: { hits.hits.1._source.int: null } + - match: { hits.hits.1._source.id: 22 } + - match: { hits.hits.2._index: test } + - match: { hits.hits.2._source.int: null } + - match: { hits.hits.2._source.id: 23 } + + - do: + search: + index: test + rest_total_hits_as_int: true + body: + "size": 3 + "sort": [ { "int": { "order": "desc" } }, { "id": { "order": "desc" } } ] + search_after: [ 0, 25 ] # making it out of min/max so only missing value hit is qualified + + - match: { hits.total: 24 } + - length: { hits.hits: 3 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.int: null } + - match: { hits.hits.0._source.id: 24 } + - match: { hits.hits.1._index: test } + - match: { hits.hits.1._source.int: null } + - match: { hits.hits.1._source.id: 23 } + - match: { hits.hits.2._index: test } + - match: { hits.hits.2._source.int: null } + - match: { hits.hits.2._source.id: 22 } + + - do: + search: + index: test + rest_total_hits_as_int: true + body: + "size": 3 + "sort": [ { "float": { "order": "asc" } }, { "id": { "order": "asc" } } ] + search_after: [ 200, 0 ] # making it out of min/max so only missing value hit is qualified + + - match: { hits.total: 24 } + - length: { hits.hits: 3 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.float: null } + - match: { hits.hits.0._source.id: 21 } + - match: { hits.hits.1._index: test } + - match: { hits.hits.1._source.float: null } + - match: { hits.hits.1._source.id: 22 } + - match: { hits.hits.2._index: test } + - match: { hits.hits.2._source.float: null } + - match: { hits.hits.2._source.id: 23 } + + - do: + search: + index: test + rest_total_hits_as_int: true + body: + "size": 3 + "sort": [ { "float": { "order": "desc" } }, { "id": { "order": "desc" } } ] + search_after: [ 0, 25 ] # making it out of min/max so only missing value hit is qualified + + - match: { hits.total: 24 } + - length: { hits.hits: 3 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.float: null } + - match: { hits.hits.0._source.id: 24 } + - match: { hits.hits.1._index: test } + - match: { hits.hits.1._source.float: null } + - match: { hits.hits.1._source.id: 23 } + - match: { hits.hits.2._index: test } + - match: { hits.hits.2._source.float: null } + - match: { hits.hits.2._source.id: 22 } + + - do: + search: + index: test + rest_total_hits_as_int: true + body: + "size": 3 + "sort": [ { "double": { "order": "asc" } }, { "id": { "order": "asc" } } ] + search_after: [ 200, 0 ] # making it out of min/max so only missing value hit is qualified + + - match: { hits.total: 24 } + - length: { hits.hits: 3 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.double: null } + - match: { hits.hits.0._source.id: 21 } + - match: { hits.hits.1._index: test } + - match: { hits.hits.1._source.double: null } + - match: { hits.hits.1._source.id: 22 } + - match: { hits.hits.2._index: test } + - match: { hits.hits.2._source.double: null } + - match: { hits.hits.2._source.id: 23 } + + - do: + search: + index: test + rest_total_hits_as_int: true + body: + "size": 3 + "sort": [ { "double": { "order": "desc" } }, { "id": { "order": "desc" } } ] + search_after: [ 0, 25 ] # making it out of min/max so only missing value hit is qualified + + - match: { hits.total: 24 } + - length: { hits.hits: 3 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.double: null } + - match: { hits.hits.0._source.id: 24 } + - match: { hits.hits.1._index: test } + - match: { hits.hits.1._source.double: null } + - match: { hits.hits.1._source.id: 23 } + - match: { hits.hits.2._index: test } + - match: { hits.hits.2._source.double: null } + - match: { hits.hits.2._source.id: 22 } + + - do: + search: + index: test + rest_total_hits_as_int: true + body: + "size": 3 + "sort": [ { "unsignedlong": { "order": "asc" } }, { "id": { "order": "asc" } } ] + search_after: [ 200, 0 ] # making it out of min/max so only missing value hit is qualified + + - match: { hits.total: 24 } + - length: { hits.hits: 3 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.unsignedlong: null } + - match: { hits.hits.0._source.id: 21 } + - match: { hits.hits.1._index: test } + - match: { hits.hits.1._source.unsignedlong: null } + - match: { hits.hits.1._source.id: 22 } + - match: { hits.hits.2._index: test } + - match: { hits.hits.2._source.unsignedlong: null } + - match: { hits.hits.2._source.id: 23 } + + - do: + search: + index: test + rest_total_hits_as_int: true + body: + "size": 3 + "sort": [ { "unsignedlong": { "order": "desc" } }, { "id": { "order": "desc" } } ] + search_after: [ 0, 25 ] # making it out of min/max so only missing value hit is qualified + + - match: { hits.total: 24 } + - length: { hits.hits: 3 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.unsignedlong: null } + - match: { hits.hits.0._source.id: 24 } + - match: { hits.hits.1._index: test } + - match: { hits.hits.1._source.unsignedlong: null } + - match: { hits.hits.1._source.id: 23 } + - match: { hits.hits.2._index: test } + - match: { hits.hits.2._source.unsignedlong: null } + - match: { hits.hits.2._source.id: 22 } diff --git a/server/build.gradle b/server/build.gradle index fa8a44ef6fc94..bf0c7791aeb55 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -47,7 +47,7 @@ publishing { } base { - archivesBaseName = 'opensearch' + archivesName = 'opensearch' } sourceSets { @@ -147,6 +147,7 @@ dependencies { api "org.apache.logging.log4j:log4j-jul:${versions.log4j}" api "org.apache.logging.log4j:log4j-core:${versions.log4j}", optional annotationProcessor "org.apache.logging.log4j:log4j-core:${versions.log4j}" + annotationProcessor project(':libs:opensearch-common') // jna api "net.java.dev.jna:jna:${versions.jna}" @@ -178,7 +179,8 @@ tasks.withType(JavaCompile).configureEach { } compileJava { - options.compilerArgs += ['-processor', 'org.apache.logging.log4j.core.config.plugins.processor.PluginProcessor'] + options.compilerArgs += ['-processor', ['org.apache.logging.log4j.core.config.plugins.processor.PluginProcessor', + 'org.opensearch.common.annotation.processor.ApiAnnotationProcessor'].join(','), '-AcontinueOnFailingChecks'] } tasks.named("internalClusterTest").configure { @@ -403,6 +405,7 @@ tasks.named("licenseHeaders").configure { } tasks.test { + environment "node.roles.test", "[]" if (BuildParams.runtimeJavaVersion > JavaVersion.VERSION_1_8) { jvmArgs += ["--add-opens", "java.base/java.nio.file=ALL-UNNAMED"] } diff --git a/server/cli/build.gradle b/server/cli/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/server/cli/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/server/src/internalClusterTest/java/org/opensearch/blocks/SimpleBlocksIT.java b/server/src/internalClusterTest/java/org/opensearch/blocks/SimpleBlocksIT.java index a7354dddfd16d..6275571cc2371 100644 --- a/server/src/internalClusterTest/java/org/opensearch/blocks/SimpleBlocksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/blocks/SimpleBlocksIT.java @@ -47,6 +47,7 @@ import org.opensearch.cluster.block.ClusterBlockLevel; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.IndexMetadata.APIBlock; +import org.opensearch.cluster.metadata.ProcessClusterEventTimeoutException; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.settings.Settings; import org.opensearch.index.IndexNotFoundException; @@ -491,10 +492,20 @@ public void testAddBlockWhileDeletingIndices() throws Exception { } catch (InterruptedException e) { throw new AssertionError(e); } - try { - assertAcked(client().admin().indices().prepareDelete(indexToDelete)); - } catch (final Exception e) { - exceptionConsumer.accept(e); + int pendingRetries = 3; + boolean success = false; + while (success == false && pendingRetries-- > 0) { + try { + assertAcked(client().admin().indices().prepareDelete(indexToDelete)); + success = true; + } catch (final Exception e) { + Throwable cause = ExceptionsHelper.unwrapCause(e); + if (cause instanceof ProcessClusterEventTimeoutException && pendingRetries > 0) { + // ignore error & retry + continue; + } + exceptionConsumer.accept(e); + } } })); } diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/ClusterIndexRefreshIntervalIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/ClusterIndexRefreshIntervalIT.java index 54824b67b7abc..25fa7ae7eb8eb 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/ClusterIndexRefreshIntervalIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/ClusterIndexRefreshIntervalIT.java @@ -35,23 +35,27 @@ import org.opensearch.action.admin.indices.get.GetIndexRequest; import org.opensearch.action.admin.indices.get.GetIndexResponse; import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.opensearch.action.admin.indices.template.put.PutIndexTemplateRequest; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.index.Index; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; import org.opensearch.indices.IndicesService; +import org.opensearch.snapshots.AbstractSnapshotIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase; import org.junit.Before; +import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.concurrent.ExecutionException; import static org.opensearch.indices.IndicesService.CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING; -@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) -public class ClusterIndexRefreshIntervalIT extends OpenSearchIntegTestCase { +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 2) +public class ClusterIndexRefreshIntervalIT extends AbstractSnapshotIntegTestCase { public static final String INDEX_NAME = "test-index"; @@ -69,9 +73,39 @@ public void setUp() throws Exception { internalCluster().startClusterManagerOnlyNode(); } + static void putIndexTemplate(String refreshInterval) { + PutIndexTemplateRequest request = new PutIndexTemplateRequest("my-template"); // <1> + request.patterns(Arrays.asList("pattern-1", "log-*")); // <2> + + request.settings( + Settings.builder() // <1> + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 1) + .put("index.refresh_interval", refreshInterval) + ); + assertTrue(client().admin().indices().putTemplate(request).actionGet().isAcknowledged()); + } + + public void testIndexTemplateCreationSucceedsWhenNoMinimumRefreshInterval() throws ExecutionException, InterruptedException { + String clusterManagerName = internalCluster().getClusterManagerName(); + List dataNodes = new ArrayList<>(internalCluster().getDataNodeNames()); + putIndexTemplate("2s"); + + // Test index creation using template with valid refresh interval + String indexName = "log-myindex-1"; + createIndex(indexName); + ensureYellowAndNoInitializingShards(indexName); + ensureGreen(indexName); + GetIndexResponse getIndexResponse = client(clusterManagerName).admin().indices().getIndex(new GetIndexRequest()).get(); + IndicesService indicesService = internalCluster().getInstance(IndicesService.class, randomFrom(dataNodes)); + String uuid = getIndexResponse.getSettings().get(indexName).get(IndexMetadata.SETTING_INDEX_UUID); + IndexService indexService = indicesService.indexService(new Index(indexName, uuid)); + assertEquals(TimeValue.timeValueSeconds(2), indexService.getRefreshTaskInterval()); + } + public void testDefaultRefreshIntervalWithUpdateClusterAndIndexSettings() throws Exception { String clusterManagerName = internalCluster().getClusterManagerName(); - List dataNodes = internalCluster().startDataOnlyNodes(2); + List dataNodes = new ArrayList<>(internalCluster().getDataNodeNames()); createIndex(INDEX_NAME); ensureYellowAndNoInitializingShards(INDEX_NAME); ensureGreen(INDEX_NAME); @@ -90,7 +124,7 @@ public void testDefaultRefreshIntervalWithUpdateClusterAndIndexSettings() throws .get(); assertEquals(refreshInterval, indexService.getRefreshTaskInterval()); - // Update of cluster.minimum.index.refresh_interval setting to value less than refreshInterval above will fail + // Update of cluster.minimum.index.refresh_interval setting to value more than default refreshInterval above will fail TimeValue invalidMinimumRefreshInterval = TimeValue.timeValueMillis(refreshInterval.millis() + randomIntBetween(1, 1000)); IllegalArgumentException exceptionDuringMinUpdate = assertThrows( IllegalArgumentException.class, @@ -205,7 +239,7 @@ public void testRefreshIntervalDisabled() throws ExecutionException, Interrupted .getAsTime(IndicesService.CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING.getKey(), TimeValue.MINUS_ONE); boolean createIndexSuccess = clusterMinimumRefreshInterval.equals(TimeValue.MINUS_ONE); String clusterManagerName = internalCluster().getClusterManagerName(); - List dataNodes = internalCluster().startDataOnlyNodes(2); + List dataNodes = new ArrayList<>(internalCluster().getDataNodeNames()); Settings settings = Settings.builder() .put(indexSettings()) .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), IndexSettings.MINIMUM_REFRESH_INTERVAL) @@ -236,7 +270,7 @@ protected TimeValue getMinRefreshIntervalForRefreshDisabled() { public void testInvalidRefreshInterval() { String invalidRefreshInterval = "-10s"; - internalCluster().startDataOnlyNodes(2); + List dataNodes = new ArrayList<>(internalCluster().getDataNodeNames()); Settings settings = Settings.builder() .put(indexSettings()) .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), invalidRefreshInterval) @@ -251,7 +285,7 @@ public void testInvalidRefreshInterval() { } public void testCreateIndexWithExplicitNullRefreshInterval() throws ExecutionException, InterruptedException { - List dataNodes = internalCluster().startDataOnlyNodes(2); + List dataNodes = new ArrayList<>(internalCluster().getDataNodeNames()); Settings indexSettings = Settings.builder() .put(indexSettings()) .putNull(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey()) @@ -278,7 +312,7 @@ public void testCreateIndexWithExplicitNullRefreshInterval() throws ExecutionExc * the index setting. The underlying index should continue to use the same refresh interval as earlier. */ public void testClusterMinimumChangeOnIndexWithCustomRefreshInterval() throws ExecutionException, InterruptedException { - List dataNodes = internalCluster().startDataOnlyNodes(2); + List dataNodes = new ArrayList<>(internalCluster().getDataNodeNames()); TimeValue customRefreshInterval = TimeValue.timeValueSeconds(getDefaultRefreshInterval().getSeconds() + randomIntBetween(1, 5)); Settings indexSettings = Settings.builder() .put(indexSettings()) diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/ClusterIndexRefreshIntervalWithNodeSettingsIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/ClusterIndexRefreshIntervalWithNodeSettingsIT.java index 5fc7bfcbcd442..2817d0e6a5951 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/ClusterIndexRefreshIntervalWithNodeSettingsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/ClusterIndexRefreshIntervalWithNodeSettingsIT.java @@ -8,9 +8,27 @@ package org.opensearch.cluster.metadata; +import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; +import org.opensearch.action.admin.indices.get.GetIndexRequest; +import org.opensearch.action.admin.indices.get.GetIndexResponse; +import org.opensearch.action.admin.indices.template.get.GetIndexTemplatesResponse; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.indices.IndicesService; +import org.opensearch.snapshots.SnapshotInfo; +import org.opensearch.snapshots.SnapshotState; + +import java.util.Locale; +import java.util.concurrent.ExecutionException; + +import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING; +import static org.opensearch.index.IndexSettings.INDEX_REFRESH_INTERVAL_SETTING; +import static org.opensearch.indices.IndicesService.CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING; +import static org.opensearch.indices.IndicesService.CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertIndexTemplateExists; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertIndexTemplateMissing; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; public class ClusterIndexRefreshIntervalWithNodeSettingsIT extends ClusterIndexRefreshIntervalIT { @@ -26,6 +44,166 @@ protected Settings nodeSettings(int nodeOrdinal) { .build(); } + public void testIndexTemplateCreationFailsWithLessThanMinimumRefreshInterval() throws ExecutionException, InterruptedException { + Throwable throwable = assertThrows(IllegalArgumentException.class, () -> putIndexTemplate("0s")); + assertEquals( + throwable.getMessage(), + String.format( + Locale.ROOT, + "invalid index.refresh_interval [%s]: cannot be smaller than cluster.minimum.index.refresh_interval [%s]", + "0s", + getMinRefreshIntervalForRefreshDisabled() + ) + ); + } + + public void testIndexTemplateSnapshotRestoreWithLessThanMinimumRefreshInterval() throws ExecutionException, InterruptedException { + putIndexTemplate("2s"); + createRepository("test-repo", "fs"); + + final SnapshotInfo snapshotInfo = clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap") + .setIndices() + .setWaitForCompletion(true) + .execute() + .get() + .getSnapshotInfo(); + assertThat(snapshotInfo.state(), is(SnapshotState.SUCCESS)); + + assertThat(snapshotInfo.totalShards(), equalTo(0)); + assertThat(snapshotInfo.successfulShards(), equalTo(0)); + + assertThat(client().admin().indices().prepareDeleteTemplate("my-template").get().isAcknowledged(), equalTo(true)); + + GetIndexTemplatesResponse getIndexTemplatesResponse = client().admin().indices().prepareGetTemplates().get(); + assertIndexTemplateMissing(getIndexTemplatesResponse, "my-template"); + + String clusterManagerName = internalCluster().getClusterManagerName(); + client(clusterManagerName).admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder() + .put(CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING.getKey(), "5s") + .put(CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING.getKey(), "4s") + ) + .get(); + + logger.info("--> try restore cluster state -- should fail"); + Throwable throwable = assertThrows( + IllegalArgumentException.class, + () -> clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") + .setWaitForCompletion(true) + .setRestoreGlobalState(true) + .execute() + .actionGet() + ); + assertEquals( + throwable.getMessage(), + "invalid index.refresh_interval [2s]: cannot be smaller than cluster.minimum.index.refresh_interval [4s]" + ); + + client(clusterManagerName).admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder() + .put(CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING.getKey(), "5s") + .put(CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING.getKey(), "1s") + ) + .get(); + + logger.info("--> restore cluster state"); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") + .setWaitForCompletion(true) + .setRestoreGlobalState(true) + .execute() + .actionGet(); + assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(0)); + + getIndexTemplatesResponse = client().admin().indices().prepareGetTemplates().get(); + assertIndexTemplateExists(getIndexTemplatesResponse, "my-template"); + + } + + public void testIndexSnapshotRestoreWithLessThanMinimumRefreshInterval() throws ExecutionException, InterruptedException { + createIndex( + "my-index", + Settings.builder() + .put(indexSettings()) + .put(INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) + .put(INDEX_REFRESH_INTERVAL_SETTING.getKey(), "2s") + .build() + ); + + createRepository("test-repo", "fs"); + + final SnapshotInfo snapshotInfo = clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap") + .setIndices() + .setWaitForCompletion(true) + .execute() + .get() + .getSnapshotInfo(); + assertThat(snapshotInfo.state(), is(SnapshotState.SUCCESS)); + + assertThat(snapshotInfo.totalShards(), equalTo(1)); + assertThat(snapshotInfo.successfulShards(), equalTo(1)); + + GetIndexResponse getIndexResponse = client().admin().indices().getIndex(new GetIndexRequest().indices("my-index")).get(); + assertEquals(1, getIndexResponse.indices().length); + assertEquals("2s", getIndexResponse.getSetting("my-index", INDEX_REFRESH_INTERVAL_SETTING.getKey())); + + assertThat(client().admin().indices().prepareDelete("my-index").get().isAcknowledged(), equalTo(true)); + + getIndexResponse = client().admin().indices().getIndex(new GetIndexRequest()).get(); + assertEquals(getIndexResponse.indices().length, 0); + + String clusterManagerName = internalCluster().getClusterManagerName(); + client(clusterManagerName).admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder() + .put(CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING.getKey(), "5s") + .put(CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING.getKey(), "4s") + ) + .get(); + + logger.info("--> try restore cluster state -- should fail"); + Throwable throwable = assertThrows( + IllegalArgumentException.class, + () -> clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") + .setWaitForCompletion(true) + .setRestoreGlobalState(true) + .execute() + .actionGet() + ); + assertEquals( + throwable.getMessage(), + "invalid index.refresh_interval [2s]: cannot be smaller than cluster.minimum.index.refresh_interval [4s]" + ); + + client(clusterManagerName).admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder() + .put(CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING.getKey(), "5s") + .put(CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING.getKey(), "1s") + ) + .get(); + + logger.info("--> try restore cluster state -- should pass"); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") + .setWaitForCompletion(true) + .setRestoreGlobalState(true) + .execute() + .actionGet(); + assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(1)); + + getIndexResponse = client().admin().indices().getIndex(new GetIndexRequest().indices("my-index")).get(); + assertEquals(getIndexResponse.indices().length, 1); + } + @Override protected TimeValue getMinRefreshIntervalForRefreshDisabled() { return TimeValue.timeValueSeconds(1); diff --git a/server/src/internalClusterTest/java/org/opensearch/index/engine/MaxDocsLimitIT.java b/server/src/internalClusterTest/java/org/opensearch/index/engine/MaxDocsLimitIT.java index 385d33c359559..8321630d34229 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/engine/MaxDocsLimitIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/engine/MaxDocsLimitIT.java @@ -36,6 +36,8 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.IndexSettings; @@ -65,6 +67,7 @@ public class MaxDocsLimitIT extends OpenSearchIntegTestCase { private static final AtomicInteger maxDocs = new AtomicInteger(); + private static final ShardId shardId = new ShardId(new Index("test", "_na_"), 0); public static class TestEnginePlugin extends Plugin implements EnginePlugin { @Override @@ -122,7 +125,10 @@ public void testMaxDocsLimit() throws Exception { IllegalArgumentException.class, () -> client().prepareDelete("test", "any-id").get() ); - assertThat(deleteError.getMessage(), containsString("Number of documents in the index can't exceed [" + maxDocs.get() + "]")); + assertThat( + deleteError.getMessage(), + containsString("Number of documents in shard " + shardId + " exceeds the limit of [" + maxDocs.get() + "] documents per shard") + ); client().admin().indices().prepareRefresh("test").get(); SearchResponse searchResponse = client().prepareSearch("test") .setQuery(new MatchAllQueryBuilder()) @@ -208,7 +214,16 @@ static IndexingResult indexDocs(int numRequests, int numThreads) throws Exceptio assertThat(resp.status(), equalTo(RestStatus.CREATED)); } catch (IllegalArgumentException e) { numFailure.incrementAndGet(); - assertThat(e.getMessage(), containsString("Number of documents in the index can't exceed [" + maxDocs.get() + "]")); + assertThat( + e.getMessage(), + containsString( + "Number of documents in shard " + + shardId + + " exceeds the limit of [" + + maxDocs.get() + + "] documents per shard" + ) + ); } } }); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java index 9c93a8f85db8e..5511bc7945d65 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java @@ -83,6 +83,7 @@ import org.opensearch.test.BackgroundIndexer; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.junit.annotations.TestLogging; import org.opensearch.test.transport.MockTransportService; import org.opensearch.transport.TransportService; import org.junit.Before; @@ -238,7 +239,7 @@ public void testReplicationAfterPrimaryRefreshAndFlush() throws Exception { createIndex(INDEX_NAME, settings); ensureGreen(INDEX_NAME); - final int initialDocCount = scaledRandomIntBetween(0, 200); + final int initialDocCount = scaledRandomIntBetween(0, 10); try ( BackgroundIndexer indexer = new BackgroundIndexer( INDEX_NAME, @@ -255,7 +256,7 @@ public void testReplicationAfterPrimaryRefreshAndFlush() throws Exception { refresh(INDEX_NAME); waitForSearchableDocs(initialDocCount, nodeA, nodeB); - final int additionalDocCount = scaledRandomIntBetween(0, 200); + final int additionalDocCount = scaledRandomIntBetween(0, 10); final int expectedHitCount = initialDocCount + additionalDocCount; indexer.start(additionalDocCount); waitForDocs(expectedHitCount, indexer); @@ -274,7 +275,7 @@ public void testIndexReopenClose() throws Exception { createIndex(INDEX_NAME); ensureGreen(INDEX_NAME); - final int initialDocCount = scaledRandomIntBetween(100, 200); + final int initialDocCount = scaledRandomIntBetween(1, 10); try ( BackgroundIndexer indexer = new BackgroundIndexer( INDEX_NAME, @@ -309,7 +310,7 @@ public void testScrollWithConcurrentIndexAndSearch() throws Exception { ensureGreen(INDEX_NAME); final List> pendingIndexResponses = new ArrayList<>(); final List> pendingSearchResponse = new ArrayList<>(); - final int searchCount = randomIntBetween(10, 20); + final int searchCount = randomIntBetween(1, 2); final WriteRequest.RefreshPolicy refreshPolicy = randomFrom(WriteRequest.RefreshPolicy.values()); for (int i = 0; i < searchCount; i++) { @@ -354,6 +355,7 @@ public void testScrollWithConcurrentIndexAndSearch() throws Exception { waitForSearchableDocs(INDEX_NAME, 2 * searchCount, List.of(primary, replica)); } + @TestLogging(reason = "Getting trace logs from replication package", value = "org.opensearch.indices.replication:TRACE") public void testMultipleShards() throws Exception { Settings indexSettings = Settings.builder() .put(super.indexSettings()) @@ -367,7 +369,7 @@ public void testMultipleShards() throws Exception { createIndex(INDEX_NAME, indexSettings); ensureGreen(INDEX_NAME); - final int initialDocCount = scaledRandomIntBetween(1, 200); + final int initialDocCount = scaledRandomIntBetween(1, 10); try ( BackgroundIndexer indexer = new BackgroundIndexer( INDEX_NAME, @@ -384,7 +386,7 @@ public void testMultipleShards() throws Exception { refresh(INDEX_NAME); waitForSearchableDocs(initialDocCount, nodeA, nodeB); - final int additionalDocCount = scaledRandomIntBetween(0, 200); + final int additionalDocCount = scaledRandomIntBetween(0, 10); final int expectedHitCount = initialDocCount + additionalDocCount; indexer.start(additionalDocCount); waitForDocs(expectedHitCount, indexer); @@ -403,8 +405,8 @@ public void testReplicationAfterForceMerge() throws Exception { createIndex(INDEX_NAME); ensureGreen(INDEX_NAME); - final int initialDocCount = scaledRandomIntBetween(0, 200); - final int additionalDocCount = scaledRandomIntBetween(0, 200); + final int initialDocCount = scaledRandomIntBetween(0, 10); + final int additionalDocCount = scaledRandomIntBetween(0, 10); final int expectedHitCount = initialDocCount + additionalDocCount; try ( BackgroundIndexer indexer = new BackgroundIndexer( @@ -509,7 +511,7 @@ public void testNodeDropWithOngoingReplication() throws Exception { connection.sendRequest(requestId, action, request, options); } ); - final int docCount = scaledRandomIntBetween(10, 200); + final int docCount = scaledRandomIntBetween(1, 10); for (int i = 0; i < docCount; i++) { client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("field", "value" + i).execute().get(); } @@ -570,7 +572,7 @@ public void testCancellation() throws Exception { } ); - final int docCount = scaledRandomIntBetween(0, 200); + final int docCount = scaledRandomIntBetween(0, 10); try ( BackgroundIndexer indexer = new BackgroundIndexer( INDEX_NAME, @@ -630,13 +632,14 @@ public void testStartReplicaAfterPrimaryIndexesDocs() throws Exception { verifyStoreContent(); } + @TestLogging(reason = "Getting trace logs from replication package", value = "org.opensearch.indices.replication:TRACE") public void testDeleteOperations() throws Exception { final String nodeA = internalCluster().startDataOnlyNode(); final String nodeB = internalCluster().startDataOnlyNode(); createIndex(INDEX_NAME); ensureGreen(INDEX_NAME); - final int initialDocCount = scaledRandomIntBetween(1, 20); + final int initialDocCount = scaledRandomIntBetween(1, 5); try ( BackgroundIndexer indexer = new BackgroundIndexer( INDEX_NAME, @@ -653,7 +656,7 @@ public void testDeleteOperations() throws Exception { refresh(INDEX_NAME); waitForSearchableDocs(initialDocCount, nodeA, nodeB); - final int additionalDocCount = scaledRandomIntBetween(0, 20); + final int additionalDocCount = scaledRandomIntBetween(0, 2); final int expectedHitCount = initialDocCount + additionalDocCount; indexer.start(additionalDocCount); waitForDocs(expectedHitCount, indexer); @@ -682,14 +685,14 @@ public void testReplicationPostDeleteAndForceMerge() throws Exception { createIndex(INDEX_NAME); final String replica = internalCluster().startDataOnlyNode(); ensureGreen(INDEX_NAME); - final int initialDocCount = scaledRandomIntBetween(10, 200); + final int initialDocCount = scaledRandomIntBetween(1, 10); for (int i = 0; i < initialDocCount; i++) { client().prepareIndex(INDEX_NAME).setId(String.valueOf(i)).setSource("foo", "bar").get(); } refresh(INDEX_NAME); waitForSearchableDocs(initialDocCount, primary, replica); - final int deletedDocCount = randomIntBetween(10, initialDocCount); + final int deletedDocCount = randomIntBetween(1, initialDocCount); for (int i = 0; i < deletedDocCount; i++) { client(primary).prepareDelete(INDEX_NAME, String.valueOf(i)).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); } @@ -710,7 +713,7 @@ public void testReplicationPostDeleteAndForceMerge() throws Exception { ); // add some docs to the xlog and drop primary. - final int additionalDocs = randomIntBetween(1, 50); + final int additionalDocs = randomIntBetween(1, 5); for (int i = initialDocCount; i < initialDocCount + additionalDocs; i++) { client().prepareIndex(INDEX_NAME).setId(String.valueOf(i)).setSource("foo", "bar").get(); } @@ -741,7 +744,7 @@ public void testUpdateOperations() throws Exception { final String replica = internalCluster().startDataOnlyNode(); ensureGreen(INDEX_NAME); - final int initialDocCount = scaledRandomIntBetween(0, 200); + final int initialDocCount = scaledRandomIntBetween(1, 5); try ( BackgroundIndexer indexer = new BackgroundIndexer( INDEX_NAME, @@ -758,7 +761,7 @@ public void testUpdateOperations() throws Exception { refresh(INDEX_NAME); waitForSearchableDocs(initialDocCount, asList(primary, replica)); - final int additionalDocCount = scaledRandomIntBetween(0, 200); + final int additionalDocCount = scaledRandomIntBetween(0, 5); final int expectedHitCount = initialDocCount + additionalDocCount; indexer.start(additionalDocCount); waitForDocs(expectedHitCount, indexer); @@ -793,7 +796,7 @@ public void testDropPrimaryDuringReplication() throws Exception { final List dataNodes = internalCluster().startDataOnlyNodes(6); ensureGreen(INDEX_NAME); - int initialDocCount = scaledRandomIntBetween(100, 200); + int initialDocCount = scaledRandomIntBetween(5, 10); try ( BackgroundIndexer indexer = new BackgroundIndexer( INDEX_NAME, @@ -827,6 +830,7 @@ public void testDropPrimaryDuringReplication() throws Exception { } } + @TestLogging(reason = "Getting trace logs from replication package", value = "org.opensearch.indices.replication:TRACE") public void testReplicaHasDiffFilesThanPrimary() throws Exception { final String primaryNode = internalCluster().startDataOnlyNode(); createIndex(INDEX_NAME, Settings.builder().put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).build()); @@ -838,7 +842,7 @@ public void testReplicaHasDiffFilesThanPrimary() throws Exception { IndexWriterConfig iwc = newIndexWriterConfig().setOpenMode(IndexWriterConfig.OpenMode.APPEND); // create a doc to index - int numDocs = 2 + random().nextInt(100); + int numDocs = 2 + random().nextInt(10); List docs = new ArrayList<>(); for (int i = 0; i < numDocs; i++) { @@ -867,7 +871,7 @@ public void testReplicaHasDiffFilesThanPrimary() throws Exception { replicaShard.finalizeReplication(segmentInfos); ensureYellow(INDEX_NAME); - final int docCount = scaledRandomIntBetween(10, 200); + final int docCount = scaledRandomIntBetween(10, 20); for (int i = 0; i < docCount; i++) { client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("field", "value" + i).execute().get(); // Refresh, this should trigger round of segment replication @@ -887,7 +891,7 @@ public void testPressureServiceStats() throws Exception { final String replicaNode = internalCluster().startDataOnlyNode(); ensureGreen(INDEX_NAME); - int initialDocCount = scaledRandomIntBetween(100, 200); + int initialDocCount = scaledRandomIntBetween(10, 20); try ( BackgroundIndexer indexer = new BackgroundIndexer( INDEX_NAME, @@ -993,8 +997,8 @@ public void testScrollCreatedOnReplica() throws Exception { final String replica = internalCluster().startDataOnlyNode(); ensureGreen(INDEX_NAME); - // index 100 docs - for (int i = 0; i < 100; i++) { + // index 10 docs + for (int i = 0; i < 10; i++) { client().prepareIndex(INDEX_NAME) .setId(String.valueOf(i)) .setSource(jsonBuilder().startObject().field("field", i).endObject()) @@ -1030,7 +1034,7 @@ public void testScrollCreatedOnReplica() throws Exception { // force call flush flush(INDEX_NAME); - for (int i = 3; i < 50; i++) { + for (int i = 3; i < 5; i++) { client().prepareDelete(INDEX_NAME, String.valueOf(i)).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); refresh(INDEX_NAME); if (randomBoolean()) { @@ -1072,7 +1076,7 @@ public void testScrollCreatedOnReplica() throws Exception { List.of(replicaShard.store().directory().listAll()).containsAll(snapshottedSegments) ) ); - assertEquals(100, scrollHits); + assertEquals(10, scrollHits); } @@ -1218,19 +1222,8 @@ public void testPitCreatedOnReplica() throws Exception { ensureYellowAndNoInitializingShards(INDEX_NAME); final String replica = internalCluster().startDataOnlyNode(); ensureGreen(INDEX_NAME); - client().prepareIndex(INDEX_NAME) - .setId("1") - .setSource("foo", randomInt()) - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .get(); - refresh(INDEX_NAME); - client().prepareIndex(INDEX_NAME) - .setId("2") - .setSource("foo", randomInt()) - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .get(); - for (int i = 3; i < 100; i++) { + for (int i = 0; i < 10; i++) { client().prepareIndex(INDEX_NAME) .setId(String.valueOf(i)) .setSource("foo", randomInt()) @@ -1280,7 +1273,7 @@ public void testPitCreatedOnReplica() throws Exception { } flush(INDEX_NAME); - for (int i = 101; i < 200; i++) { + for (int i = 11; i < 20; i++) { client().prepareIndex(INDEX_NAME) .setId(String.valueOf(i)) .setSource("foo", randomInt()) diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationSuiteIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationSuiteIT.java index 800704eae7fa7..8c045c1560dd3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationSuiteIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationSuiteIT.java @@ -8,6 +8,7 @@ package org.opensearch.indices.replication; +import org.apache.lucene.tests.util.LuceneTestCase; import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; @@ -15,6 +16,7 @@ import org.opensearch.test.OpenSearchIntegTestCase; import org.junit.Before; +@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9499") @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, minNumDataNodes = 2) public class SegmentReplicationSuiteIT extends SegmentReplicationBaseIT { diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java index 0967acb37d3e8..9c96d4861d426 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java @@ -169,7 +169,7 @@ private Settings.Builder settingsBuilder() { return Settings.builder().put(indexSettings()); } - public void testFieldDataStats() { + public void testFieldDataStats() throws InterruptedException { assertAcked( client().admin() .indices() @@ -182,6 +182,7 @@ public void testFieldDataStats() { client().prepareIndex("test").setId("1").setSource("field", "value1", "field2", "value1").execute().actionGet(); client().prepareIndex("test").setId("2").setSource("field", "value2", "field2", "value2").execute().actionGet(); client().admin().indices().prepareRefresh().execute().actionGet(); + indexRandomForConcurrentSearch("test"); NodesStatsResponse nodesStats = client().admin().cluster().prepareNodesStats("data:true").setIndices(true).execute().actionGet(); assertThat( @@ -305,6 +306,7 @@ public void testClearAllCaches() throws Exception { client().prepareIndex("test").setId("1").setSource("field", "value1").execute().actionGet(); client().prepareIndex("test").setId("2").setSource("field", "value2").execute().actionGet(); client().admin().indices().prepareRefresh().execute().actionGet(); + indexRandomForConcurrentSearch("test"); NodesStatsResponse nodesStats = client().admin().cluster().prepareNodesStats("data:true").setIndices(true).execute().actionGet(); assertThat( diff --git a/server/src/internalClusterTest/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlMultiNodeIT.java b/server/src/internalClusterTest/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlMultiNodeIT.java new file mode 100644 index 0000000000000..0af3d31f9e846 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlMultiNodeIT.java @@ -0,0 +1,292 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.admin.cluster.health.ClusterHealthRequest; +import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; +import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest; +import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; +import org.opensearch.action.admin.indices.stats.ShardStats; +import org.opensearch.action.bulk.BulkRequest; +import org.opensearch.action.bulk.BulkResponse; +import org.opensearch.action.index.IndexRequest; +import org.opensearch.action.search.SearchPhaseExecutionException; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.common.UUIDs; +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.node.resource.tracker.ResourceTrackerSettings; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; +import org.opensearch.ratelimitting.admissioncontrol.settings.CpuBasedAdmissionControllerSettings; +import org.opensearch.ratelimitting.admissioncontrol.stats.AdmissionControllerStats; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.After; +import org.junit.Before; + +import java.util.Arrays; +import java.util.Collections; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicLong; +import java.util.stream.Stream; + +import static org.opensearch.ratelimitting.admissioncontrol.AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE; +import static org.opensearch.ratelimitting.admissioncontrol.settings.CpuBasedAdmissionControllerSettings.INDEXING_CPU_USAGE_LIMIT; +import static org.opensearch.ratelimitting.admissioncontrol.settings.CpuBasedAdmissionControllerSettings.SEARCH_CPU_USAGE_LIMIT; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 2, numClientNodes = 1) +public class AdmissionControlMultiNodeIT extends OpenSearchIntegTestCase { + + public static final Settings settings = Settings.builder() + .put(ResourceTrackerSettings.GLOBAL_CPU_USAGE_AC_WINDOW_DURATION_SETTING.getKey(), TimeValue.timeValueMillis(500)) + .put(ResourceTrackerSettings.GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING.getKey(), TimeValue.timeValueMillis(500)) + .put(ADMISSION_CONTROL_TRANSPORT_LAYER_MODE.getKey(), AdmissionControlMode.ENFORCED) + .put(SEARCH_CPU_USAGE_LIMIT.getKey(), 0) + .put(INDEXING_CPU_USAGE_LIMIT.getKey(), 0) + .build(); + + private static final Logger LOGGER = LogManager.getLogger(AdmissionControlMultiNodeIT.class); + + public static final String INDEX_NAME = "test_index"; + + @Before + public void init() { + assertAcked( + prepareCreate( + INDEX_NAME, + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + ) + ); + ensureGreen(INDEX_NAME); + } + + @After + public void cleanup() { + client().admin().indices().prepareDelete(INDEX_NAME).get(); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(settings).build(); + } + + public void testAdmissionControlRejectionOnEnforced() { + Tuple primaryReplicaNodeNames = getPrimaryReplicaNodeNames(INDEX_NAME); + String primaryName = primaryReplicaNodeNames.v1(); + String replicaName = primaryReplicaNodeNames.v2(); + String coordinatingOnlyNode = getCoordinatingOnlyNode(); + AdmissionControlService admissionControlServicePrimary = internalCluster().getInstance(AdmissionControlService.class, primaryName); + AdmissionControlService admissionControlServiceReplica = internalCluster().getInstance(AdmissionControlService.class, replicaName); + final BulkRequest bulkRequest = new BulkRequest(); + for (int i = 0; i < 3; ++i) { + IndexRequest request = new IndexRequest(INDEX_NAME).id(UUIDs.base64UUID()) + .source(Collections.singletonMap("key", randomAlphaOfLength(50))); + bulkRequest.add(request); + } + BulkResponse res = client(coordinatingOnlyNode).bulk(bulkRequest).actionGet(); + assertEquals(429, res.getItems()[0].getFailure().getStatus().getStatus()); + AdmissionControllerStats admissionControlPrimaryStats = admissionControlServicePrimary.stats() + .getAdmissionControllerStatsList() + .get(0); + assertEquals(admissionControlPrimaryStats.rejectionCount.get(AdmissionControlActionType.INDEXING.getType()).longValue(), 1); + Arrays.stream(res.getItems()).forEach(bulkItemResponse -> { + assertTrue(bulkItemResponse.getFailureMessage().contains("OpenSearchRejectedExecutionException")); + }); + SearchResponse searchResponse; + try { + searchResponse = client(coordinatingOnlyNode).prepareSearch(INDEX_NAME).get(); + } catch (Exception exception) { + assertTrue(((SearchPhaseExecutionException) exception).getDetailedMessage().contains("OpenSearchRejectedExecutionException")); + } + AdmissionControllerStats primaryStats = admissionControlServicePrimary.stats().getAdmissionControllerStatsList().get(0); + assertEquals(primaryStats.rejectionCount.get(AdmissionControlActionType.SEARCH.getType()).longValue(), 1); + } + + public void testAdmissionControlEnforcedOnNonACEnabledActions() throws ExecutionException, InterruptedException { + String coordinatingOnlyNode = getCoordinatingOnlyNode(); + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + + updateSettingsRequest.transientSettings( + Settings.builder() + .put( + CpuBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.ENFORCED.getMode() + ) + ); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + NodesStatsRequest nodesStatsRequest = new NodesStatsRequest(); + nodesStatsRequest.clear() + .indices(true) + .addMetrics( + NodesStatsRequest.Metric.JVM.metricName(), + NodesStatsRequest.Metric.OS.metricName(), + NodesStatsRequest.Metric.FS.metricName(), + NodesStatsRequest.Metric.PROCESS.metricName(), + NodesStatsRequest.Metric.ADMISSION_CONTROL.metricName() + ); + NodesStatsResponse nodesStatsResponse = client(coordinatingOnlyNode).admin().cluster().nodesStats(nodesStatsRequest).actionGet(); + ClusterHealthResponse clusterHealthResponse = client().admin().cluster().health(new ClusterHealthRequest()).actionGet(); + assertEquals(200, clusterHealthResponse.status().getStatus()); + assertFalse(nodesStatsResponse.hasFailures()); + } + + public void testAdmissionControlRejectionOnMonitor() { + Tuple primaryReplicaNodeNames = getPrimaryReplicaNodeNames(INDEX_NAME); + String primaryName = primaryReplicaNodeNames.v1(); + String replicaName = primaryReplicaNodeNames.v2(); + String coordinatingOnlyNode = getCoordinatingOnlyNode(); + + AdmissionControlService admissionControlServicePrimary = internalCluster().getInstance(AdmissionControlService.class, primaryName); + AdmissionControlService admissionControlServiceReplica = internalCluster().getInstance(AdmissionControlService.class, replicaName); + + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + + updateSettingsRequest.transientSettings( + Settings.builder() + .put( + CpuBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.MONITOR.getMode() + ) + ); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + final BulkRequest bulkRequest = new BulkRequest(); + for (int i = 0; i < 3; ++i) { + IndexRequest request = new IndexRequest(INDEX_NAME).id(UUIDs.base64UUID()) + .source(Collections.singletonMap("key", randomAlphaOfLength(50))); + bulkRequest.add(request); + } + BulkResponse res = client(coordinatingOnlyNode).bulk(bulkRequest).actionGet(); + assertFalse(res.hasFailures()); + AdmissionControllerStats admissionControlPrimaryStats = admissionControlServicePrimary.stats() + .getAdmissionControllerStatsList() + .get(0); + AdmissionControllerStats admissionControlReplicaStats = admissionControlServiceReplica.stats() + .getAdmissionControllerStatsList() + .get(0); + long primaryRejectionCount = admissionControlPrimaryStats.rejectionCount.getOrDefault( + AdmissionControlActionType.INDEXING.getType(), + new AtomicLong(0).longValue() + ); + long replicaRejectionCount = admissionControlReplicaStats.rejectionCount.getOrDefault( + AdmissionControlActionType.INDEXING.getType(), + new AtomicLong(0).longValue() + ); + assertEquals(primaryRejectionCount, 1); + assertEquals(replicaRejectionCount, 0); + SearchResponse searchResponse; + searchResponse = client(coordinatingOnlyNode).prepareSearch(INDEX_NAME).get(); + admissionControlPrimaryStats = admissionControlServicePrimary.stats().getAdmissionControllerStatsList().get(0); + admissionControlReplicaStats = admissionControlServiceReplica.stats().getAdmissionControllerStatsList().get(0); + primaryRejectionCount = admissionControlPrimaryStats.getRejectionCount() + .getOrDefault(AdmissionControlActionType.SEARCH.getType(), new AtomicLong(0).longValue()); + replicaRejectionCount = admissionControlReplicaStats.getRejectionCount() + .getOrDefault(AdmissionControlActionType.SEARCH.getType(), new AtomicLong(0).longValue()); + assertTrue(primaryRejectionCount == 1 || replicaRejectionCount == 1); + assertFalse(primaryRejectionCount == 1 && replicaRejectionCount == 1); + } + + public void testAdmissionControlRejectionOnDisabled() { + Tuple primaryReplicaNodeNames = getPrimaryReplicaNodeNames(INDEX_NAME); + String primaryName = primaryReplicaNodeNames.v1(); + String replicaName = primaryReplicaNodeNames.v2(); + String coordinatingOnlyNode = getCoordinatingOnlyNode(); + + AdmissionControlService admissionControlServicePrimary = internalCluster().getInstance(AdmissionControlService.class, primaryName); + AdmissionControlService admissionControlServiceReplica = internalCluster().getInstance(AdmissionControlService.class, replicaName); + + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + + updateSettingsRequest.transientSettings( + Settings.builder() + .put( + CpuBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.DISABLED.getMode() + ) + ); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + final BulkRequest bulkRequest = new BulkRequest(); + for (int i = 0; i < 3; ++i) { + IndexRequest request = new IndexRequest(INDEX_NAME).id(UUIDs.base64UUID()) + .source(Collections.singletonMap("key", randomAlphaOfLength(50))); + bulkRequest.add(request); + } + BulkResponse res = client(coordinatingOnlyNode).bulk(bulkRequest).actionGet(); + assertFalse(res.hasFailures()); + AdmissionControllerStats admissionControlPrimaryStats = admissionControlServicePrimary.stats() + .getAdmissionControllerStatsList() + .get(0); + AdmissionControllerStats admissionControlReplicaStats = admissionControlServiceReplica.stats() + .getAdmissionControllerStatsList() + .get(0); + long primaryRejectionCount = admissionControlPrimaryStats.rejectionCount.getOrDefault( + AdmissionControlActionType.INDEXING.getType(), + new AtomicLong(0).longValue() + ); + long replicaRejectionCount = admissionControlReplicaStats.rejectionCount.getOrDefault( + AdmissionControlActionType.INDEXING.getType(), + new AtomicLong(0).longValue() + ); + assertEquals(primaryRejectionCount, 0); + assertEquals(replicaRejectionCount, 0); + SearchResponse searchResponse; + searchResponse = client(coordinatingOnlyNode).prepareSearch(INDEX_NAME).get(); + admissionControlPrimaryStats = admissionControlServicePrimary.stats().getAdmissionControllerStatsList().get(0); + admissionControlReplicaStats = admissionControlServiceReplica.stats().getAdmissionControllerStatsList().get(0); + primaryRejectionCount = admissionControlPrimaryStats.getRejectionCount() + .getOrDefault(AdmissionControlActionType.SEARCH.getType(), new AtomicLong(0).longValue()); + replicaRejectionCount = admissionControlReplicaStats.getRejectionCount() + .getOrDefault(AdmissionControlActionType.SEARCH.getType(), new AtomicLong(0).longValue()); + assertTrue(primaryRejectionCount == 0 && replicaRejectionCount == 0); + } + + private Tuple getPrimaryReplicaNodeNames(String indexName) { + IndicesStatsResponse response = client().admin().indices().prepareStats(indexName).get(); + String primaryId = Stream.of(response.getShards()) + .map(ShardStats::getShardRouting) + .filter(ShardRouting::primary) + .findAny() + .get() + .currentNodeId(); + String replicaId = Stream.of(response.getShards()) + .map(ShardStats::getShardRouting) + .filter(sr -> sr.primary() == false) + .findAny() + .get() + .currentNodeId(); + DiscoveryNodes nodes = client().admin().cluster().prepareState().get().getState().nodes(); + String primaryName = nodes.get(primaryId).getName(); + String replicaName = nodes.get(replicaId).getName(); + return new Tuple<>(primaryName, replicaName); + } + + private String getCoordinatingOnlyNode() { + return client().admin() + .cluster() + .prepareState() + .get() + .getState() + .nodes() + .getCoordinatingOnlyNodes() + .values() + .iterator() + .next() + .getName(); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/BaseRemoteStoreRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/BaseRemoteStoreRestoreIT.java index 99c5d7fb2bae7..d29dacb001434 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/BaseRemoteStoreRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/BaseRemoteStoreRestoreIT.java @@ -8,9 +8,7 @@ package org.opensearch.remotestore; -import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; import org.opensearch.action.index.IndexResponse; -import org.opensearch.action.support.PlainActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.plugins.Plugin; import org.opensearch.test.transport.MockTransportService; @@ -20,7 +18,6 @@ import java.util.Map; import java.util.concurrent.TimeUnit; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; public class BaseRemoteStoreRestoreIT extends RemoteStoreBaseIntegTestCase { @@ -49,18 +46,6 @@ protected void restore(String... indices) { restore(randomBoolean(), indices); } - protected void restore(boolean restoreAllShards, String... indices) { - if (restoreAllShards) { - assertAcked(client().admin().indices().prepareClose(indices)); - } - client().admin() - .cluster() - .restoreRemoteStore( - new RestoreRemoteStoreRequest().indices(indices).restoreAllShards(restoreAllShards), - PlainActionFuture.newFuture() - ); - } - protected void verifyRestoredData(Map indexStats, String indexName, boolean indexMoreData) throws Exception { ensureYellowAndNoInitializingShards(indexName); ensureGreen(indexName); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureAndResiliencyIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureAndResiliencyIT.java index f19c9db7874db..6b6a96dc42a84 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureAndResiliencyIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureAndResiliencyIT.java @@ -14,7 +14,6 @@ import org.opensearch.action.admin.indices.flush.FlushResponse; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.concurrent.AbstractAsyncTask; import org.opensearch.common.util.concurrent.UncategorizedExecutionException; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; @@ -22,6 +21,7 @@ import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.IndexService; +import org.opensearch.index.IndexServiceTestUtils; import org.opensearch.index.remote.RemoteSegmentTransferTracker; import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.IndicesService; @@ -175,7 +175,7 @@ public void testAsyncTrimTaskSucceeds() { logger.info("Increasing the frequency of async trim task to ensure it runs in background while indexing"); IndexService indexService = internalCluster().getInstance(IndicesService.class, dataNodeName).iterator().next(); - ((AbstractAsyncTask) indexService.getTrimTranslogTask()).setInterval(TimeValue.timeValueMillis(100)); + IndexServiceTestUtils.setTrimTranslogTaskInterval(indexService, TimeValue.timeValueMillis(100)); logger.info("--> Indexing data"); indexData(randomIntBetween(2, 5), true); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java index 8b4981a15433a..8c15ebd0505d9 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java @@ -8,12 +8,16 @@ package org.opensearch.remotestore; +import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; +import org.opensearch.action.admin.indices.get.GetIndexRequest; +import org.opensearch.action.admin.indices.get.GetIndexResponse; import org.opensearch.action.bulk.BulkItemResponse; import org.opensearch.action.bulk.BulkRequest; import org.opensearch.action.bulk.BulkResponse; import org.opensearch.action.index.IndexRequest; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.index.IndexResponse; +import org.opensearch.action.support.PlainActionFuture; import org.opensearch.action.support.WriteRequest; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.RepositoriesMetadata; @@ -23,9 +27,13 @@ import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.index.Index; import org.opensearch.index.IndexModule; +import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.indices.IndicesService; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.blobstore.BlobStoreRepository; @@ -43,6 +51,7 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; @@ -51,6 +60,7 @@ import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; public class RemoteStoreBaseIntegTestCase extends OpenSearchIntegTestCase { protected static final String REPOSITORY_NAME = "test-remote-store-repo"; @@ -380,4 +390,25 @@ public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) { return filesExisting.get(); } + + protected IndexShard getIndexShard(String dataNode, String indexName) throws ExecutionException, InterruptedException { + String clusterManagerName = internalCluster().getClusterManagerName(); + IndicesService indicesService = internalCluster().getInstance(IndicesService.class, dataNode); + GetIndexResponse getIndexResponse = client(clusterManagerName).admin().indices().getIndex(new GetIndexRequest()).get(); + String uuid = getIndexResponse.getSettings().get(indexName).get(IndexMetadata.SETTING_INDEX_UUID); + IndexService indexService = indicesService.indexService(new Index(indexName, uuid)); + return indexService.getShard(0); + } + + protected void restore(boolean restoreAllShards, String... indices) { + if (restoreAllShards) { + assertAcked(client().admin().indices().prepareClose(indices)); + } + client().admin() + .cluster() + .restoreRemoteStore( + new RestoreRemoteStoreRequest().indices(indices).restoreAllShards(restoreAllShards), + PlainActionFuture.newFuture() + ); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java index b3b4f8e10fd31..e1997fea3433a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java @@ -8,44 +8,54 @@ package org.opensearch.remotestore; +import org.opensearch.OpenSearchException; +import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; -import org.opensearch.action.admin.indices.get.GetIndexRequest; -import org.opensearch.action.admin.indices.get.GetIndexResponse; +import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.action.admin.indices.recovery.RecoveryResponse; import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.opensearch.action.index.IndexResponse; +import org.opensearch.action.search.SearchPhaseExecutionException; +import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.routing.RecoverySource; +import org.opensearch.cluster.routing.allocation.command.MoveAllocationCommand; +import org.opensearch.common.Priority; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.BufferedAsyncIOProcessor; -import org.opensearch.core.index.Index; -import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.IndexShardClosedException; import org.opensearch.index.translog.Translog.Durability; import org.opensearch.indices.IndicesService; +import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.plugins.Plugin; +import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.transport.MockTransportService; +import org.opensearch.transport.TransportService; import org.hamcrest.MatcherAssert; +import java.io.IOException; +import java.nio.file.Files; import java.nio.file.Path; import java.util.Arrays; import java.util.Collection; import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; +import java.util.stream.Stream; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; -import static org.opensearch.index.shard.RemoteStoreRefreshListener.LAST_N_METADATA_FILES_TO_KEEP; import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; @@ -167,7 +177,7 @@ public void testRemoteTranslogCleanup() throws Exception { } public void testStaleCommitDeletionWithInvokeFlush() throws Exception { - internalCluster().startNode(); + String dataNode = internalCluster().startNode(); createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000l, -1)); int numberOfIterations = randomIntBetween(5, 15); indexData(numberOfIterations, true, INDEX_NAME); @@ -177,17 +187,20 @@ public void testStaleCommitDeletionWithInvokeFlush() throws Exception { .get() .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); Path indexPath = Path.of(String.valueOf(segmentRepoPath), indexUUID, "/0/segments/metadata"); + + IndexShard indexShard = getIndexShard(dataNode, INDEX_NAME); + int lastNMetadataFilesToKeep = indexShard.getRecoverySettings().getMinRemoteSegmentMetadataFiles(); // Delete is async. assertBusy(() -> { int actualFileCount = getFileCount(indexPath); - if (numberOfIterations <= LAST_N_METADATA_FILES_TO_KEEP) { + if (numberOfIterations <= lastNMetadataFilesToKeep) { MatcherAssert.assertThat(actualFileCount, is(oneOf(numberOfIterations - 1, numberOfIterations, numberOfIterations + 1))); } else { // As delete is async its possible that the file gets created before the deletion or after // deletion. MatcherAssert.assertThat( actualFileCount, - is(oneOf(LAST_N_METADATA_FILES_TO_KEEP - 1, LAST_N_METADATA_FILES_TO_KEEP, LAST_N_METADATA_FILES_TO_KEEP + 1)) + is(oneOf(lastNMetadataFilesToKeep - 1, lastNMetadataFilesToKeep, lastNMetadataFilesToKeep + 1)) ); } }, 30, TimeUnit.SECONDS); @@ -209,6 +222,44 @@ public void testStaleCommitDeletionWithoutInvokeFlush() throws Exception { MatcherAssert.assertThat(actualFileCount, is(oneOf(numberOfIterations - 1, numberOfIterations, numberOfIterations + 1))); } + public void testStaleCommitDeletionWithMinSegmentFiles_3() throws Exception { + Settings.Builder settings = Settings.builder() + .put(RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), "3"); + internalCluster().startNode(settings); + + createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000l, -1)); + int numberOfIterations = randomIntBetween(5, 15); + indexData(numberOfIterations, true, INDEX_NAME); + String indexUUID = client().admin() + .indices() + .prepareGetSettings(INDEX_NAME) + .get() + .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); + Path indexPath = Path.of(String.valueOf(segmentRepoPath), indexUUID, "/0/segments/metadata"); + int actualFileCount = getFileCount(indexPath); + // We also allow (numberOfIterations + 1) as index creation also triggers refresh. + MatcherAssert.assertThat(actualFileCount, is(oneOf(4))); + } + + public void testStaleCommitDeletionWithMinSegmentFiles_Disabled() throws Exception { + Settings.Builder settings = Settings.builder() + .put(RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), "-1"); + internalCluster().startNode(settings); + + createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000l, -1)); + int numberOfIterations = randomIntBetween(12, 18); + indexData(numberOfIterations, true, INDEX_NAME); + String indexUUID = client().admin() + .indices() + .prepareGetSettings(INDEX_NAME) + .get() + .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); + Path indexPath = Path.of(String.valueOf(segmentRepoPath), indexUUID, "/0/segments/metadata"); + int actualFileCount = getFileCount(indexPath); + // We also allow (numberOfIterations + 1) as index creation also triggers refresh. + MatcherAssert.assertThat(actualFileCount, is(oneOf(numberOfIterations + 1))); + } + /** * Tests that when the index setting is not passed during index creation, the buffer interval picked up is the cluster * default. @@ -222,7 +273,7 @@ public void testDefaultBufferInterval() throws ExecutionException, InterruptedEx ensureGreen(INDEX_NAME); assertClusterRemoteBufferInterval(IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, dataNode); - IndexShard indexShard = getIndexShard(dataNode); + IndexShard indexShard = getIndexShard(dataNode, INDEX_NAME); assertTrue(indexShard.getTranslogSyncProcessor() instanceof BufferedAsyncIOProcessor); assertBufferInterval(IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, indexShard); @@ -255,7 +306,7 @@ public void testOverriddenBufferInterval() throws ExecutionException, Interrupte ensureYellowAndNoInitializingShards(INDEX_NAME); ensureGreen(INDEX_NAME); - IndexShard indexShard = getIndexShard(dataNode); + IndexShard indexShard = getIndexShard(dataNode, INDEX_NAME); assertTrue(indexShard.getTranslogSyncProcessor() instanceof BufferedAsyncIOProcessor); assertBufferInterval(bufferInterval, indexShard); @@ -371,7 +422,7 @@ private void testRestrictSettingFalse(boolean setRestrictFalse, Durability durab .put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), durability) .build(); createIndex(INDEX_NAME, indexSettings); - IndexShard indexShard = getIndexShard(dataNode); + IndexShard indexShard = getIndexShard(dataNode, INDEX_NAME); assertEquals(durability, indexShard.indexSettings().getTranslogDurability()); durability = randomFrom(Durability.values()); @@ -404,7 +455,7 @@ public void testAsyncDurabilityThrowsExceptionWhenRestrictSettingTrue() throws E // Case 2 - Test update index fails createIndex(INDEX_NAME); - IndexShard indexShard = getIndexShard(dataNode); + IndexShard indexShard = getIndexShard(dataNode, INDEX_NAME); assertEquals(Durability.REQUEST, indexShard.indexSettings().getTranslogDurability()); exception = assertThrows( IllegalArgumentException.class, @@ -416,15 +467,6 @@ public void testAsyncDurabilityThrowsExceptionWhenRestrictSettingTrue() throws E assertEquals(expectedExceptionMsg, exception.getMessage()); } - private IndexShard getIndexShard(String dataNode) throws ExecutionException, InterruptedException { - String clusterManagerName = internalCluster().getClusterManagerName(); - IndicesService indicesService = internalCluster().getInstance(IndicesService.class, dataNode); - GetIndexResponse getIndexResponse = client(clusterManagerName).admin().indices().getIndex(new GetIndexRequest()).get(); - String uuid = getIndexResponse.getSettings().get(INDEX_NAME).get(IndexMetadata.SETTING_INDEX_UUID); - IndexService indexService = indicesService.indexService(new Index(INDEX_NAME, uuid)); - return indexService.getShard(0); - } - private void assertClusterRemoteBufferInterval(TimeValue expectedBufferInterval, String dataNode) { IndicesService indicesService = internalCluster().getInstance(IndicesService.class, dataNode); assertEquals(expectedBufferInterval, indicesService.getClusterRemoteTranslogBufferInterval()); @@ -516,7 +558,7 @@ public void testNoSearchIdleForAnyReplicaCount() throws ExecutionException, Inte createIndex(INDEX_NAME, remoteStoreIndexSettings(0)); ensureGreen(INDEX_NAME); - IndexShard indexShard = getIndexShard(primaryShardNode); + IndexShard indexShard = getIndexShard(primaryShardNode, INDEX_NAME); assertFalse(indexShard.isSearchIdleSupported()); String replicaShardNode = internalCluster().startDataOnlyNodes(1).get(0); @@ -529,7 +571,235 @@ public void testNoSearchIdleForAnyReplicaCount() throws ExecutionException, Inte ensureGreen(INDEX_NAME); assertFalse(indexShard.isSearchIdleSupported()); - indexShard = getIndexShard(replicaShardNode); + indexShard = getIndexShard(replicaShardNode, INDEX_NAME); assertFalse(indexShard.isSearchIdleSupported()); } + + public void testFallbackToNodeToNodeSegmentCopy() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + List dataNodes = internalCluster().startDataOnlyNodes(2); + + // 1. Create index with 0 replica + createIndex(INDEX_NAME, remoteStoreIndexSettings(0, 10000L, -1)); + ensureGreen(INDEX_NAME); + + // 2. Index docs + indexBulk(INDEX_NAME, 50); + flushAndRefresh(INDEX_NAME); + + // 3. Delete data from remote segment store + String indexUUID = client().admin() + .indices() + .prepareGetSettings(INDEX_NAME) + .get() + .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); + Path segmentDataPath = Path.of(String.valueOf(segmentRepoPath), indexUUID, "/0/segments/data"); + + try (Stream files = Files.list(segmentDataPath)) { + files.forEach(p -> { + try { + Files.delete(p); + } catch (IOException e) { + // Ignore + } + }); + } + + // 4. Start recovery by changing number of replicas to 1 + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings(INDEX_NAME) + .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)) + ); + + // 5. Ensure green and verify number of docs + ensureGreen(INDEX_NAME); + assertBusy(() -> { + assertHitCount(client(dataNodes.get(0)).prepareSearch(INDEX_NAME).setSize(0).get(), 50); + assertHitCount(client(dataNodes.get(1)).prepareSearch(INDEX_NAME).setSize(0).get(), 50); + }); + } + + public void testNoMultipleWriterDuringPrimaryRelocation() throws ExecutionException, InterruptedException { + // In this test, we trigger a force flush on existing primary while the primary mode on new primary has been + // activated. There was a bug in primary relocation of remote store enabled indexes where the new primary + // starts uploading translog and segments even before the cluster manager has started this shard. With this test, + // we check that we do not overwrite any file on remote store. Here we will also increase the replica count to + // check that there are no duplicate metadata files for translog or upload. + + internalCluster().startClusterManagerOnlyNode(); + String oldPrimary = internalCluster().startDataOnlyNodes(1).get(0); + createIndex(INDEX_NAME, remoteStoreIndexSettings(0)); + ensureGreen(INDEX_NAME); + indexBulk(INDEX_NAME, randomIntBetween(5, 10)); + String newPrimary = internalCluster().startDataOnlyNodes(1).get(0); + ensureStableCluster(3); + + IndexShard oldPrimaryIndexShard = getIndexShard(oldPrimary, INDEX_NAME); + CountDownLatch flushLatch = new CountDownLatch(1); + + MockTransportService mockTargetTransportService = ((MockTransportService) internalCluster().getInstance( + TransportService.class, + oldPrimary + )); + mockTargetTransportService.addSendBehavior((connection, requestId, action, request, options) -> { + if (PeerRecoveryTargetService.Actions.HANDOFF_PRIMARY_CONTEXT.equals(action)) { + flushLatch.countDown(); + } + connection.sendRequest(requestId, action, request, options); + }); + + logger.info("--> relocate the shard"); + client().admin() + .cluster() + .prepareReroute() + .add(new MoveAllocationCommand(INDEX_NAME, 0, oldPrimary, newPrimary)) + .execute() + .actionGet(); + + CountDownLatch flushDone = new CountDownLatch(1); + Thread flushThread = new Thread(() -> { + try { + flushLatch.await(2, TimeUnit.SECONDS); + oldPrimaryIndexShard.flush(new FlushRequest().waitIfOngoing(true).force(true)); + // newPrimaryTranslogRepo.setSleepSeconds(0); + } catch (IndexShardClosedException e) { + // this is fine + } catch (InterruptedException e) { + throw new AssertionError(e); + } finally { + flushDone.countDown(); + } + }); + flushThread.start(); + flushDone.await(5, TimeUnit.SECONDS); + flushThread.join(); + + ClusterHealthResponse clusterHealthResponse = client().admin() + .cluster() + .prepareHealth() + .setWaitForStatus(ClusterHealthStatus.GREEN) + .setWaitForEvents(Priority.LANGUID) + .setWaitForNoRelocatingShards(true) + .setTimeout(TimeValue.timeValueSeconds(5)) + .execute() + .actionGet(); + assertFalse(clusterHealthResponse.isTimedOut()); + + client().admin() + .indices() + .updateSettings( + new UpdateSettingsRequest(INDEX_NAME).settings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)) + ) + .get(); + + clusterHealthResponse = client().admin() + .cluster() + .prepareHealth() + .setWaitForStatus(ClusterHealthStatus.GREEN) + .setWaitForEvents(Priority.LANGUID) + .setWaitForNoRelocatingShards(true) + .setTimeout(TimeValue.timeValueSeconds(5)) + .execute() + .actionGet(); + assertFalse(clusterHealthResponse.isTimedOut()); + } + + public void testResumeUploadAfterFailedPrimaryRelocation() throws ExecutionException, InterruptedException, IOException { + // In this test, we fail the hand off during the primary relocation. This will undo the drainRefreshes and + // drainSync performed as part of relocation handoff (before performing the handoff transport action). + // We validate the same here by failing the peer recovery and ensuring we can index afterward as well. + + internalCluster().startClusterManagerOnlyNode(); + String oldPrimary = internalCluster().startDataOnlyNodes(1).get(0); + createIndex(INDEX_NAME, remoteStoreIndexSettings(0)); + ensureGreen(INDEX_NAME); + int docs = randomIntBetween(5, 10); + indexBulk(INDEX_NAME, docs); + flushAndRefresh(INDEX_NAME); + assertHitCount(client(oldPrimary).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), docs); + String newPrimary = internalCluster().startDataOnlyNodes(1).get(0); + ensureStableCluster(3); + + IndexShard oldPrimaryIndexShard = getIndexShard(oldPrimary, INDEX_NAME); + CountDownLatch handOffLatch = new CountDownLatch(1); + + MockTransportService mockTargetTransportService = ((MockTransportService) internalCluster().getInstance( + TransportService.class, + oldPrimary + )); + mockTargetTransportService.addSendBehavior((connection, requestId, action, request, options) -> { + if (PeerRecoveryTargetService.Actions.HANDOFF_PRIMARY_CONTEXT.equals(action)) { + handOffLatch.countDown(); + throw new OpenSearchException("failing recovery for test purposes"); + } + connection.sendRequest(requestId, action, request, options); + }); + + logger.info("--> relocate the shard"); + client().admin() + .cluster() + .prepareReroute() + .add(new MoveAllocationCommand(INDEX_NAME, 0, oldPrimary, newPrimary)) + .execute() + .actionGet(); + + handOffLatch.await(30, TimeUnit.SECONDS); + + assertTrue(oldPrimaryIndexShard.isStartedPrimary()); + assertEquals(oldPrimary, primaryNodeName(INDEX_NAME)); + assertHitCount(client(oldPrimary).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), docs); + + SearchPhaseExecutionException ex = assertThrows( + SearchPhaseExecutionException.class, + () -> client(newPrimary).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get() + ); + assertEquals("all shards failed", ex.getMessage()); + + int moreDocs = randomIntBetween(5, 10); + indexBulk(INDEX_NAME, moreDocs); + flushAndRefresh(INDEX_NAME); + int uncommittedOps = randomIntBetween(5, 10); + indexBulk(INDEX_NAME, uncommittedOps); + assertHitCount(client(oldPrimary).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), docs + moreDocs); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNodeName(INDEX_NAME))); + + restore(true, INDEX_NAME); + ensureGreen(INDEX_NAME); + assertHitCount( + client(newPrimary).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), + docs + moreDocs + uncommittedOps + ); + + String newNode = internalCluster().startDataOnlyNodes(1).get(0); + ensureStableCluster(3); + client().admin() + .cluster() + .prepareReroute() + .add(new MoveAllocationCommand(INDEX_NAME, 0, newPrimary, newNode)) + .execute() + .actionGet(); + + ClusterHealthResponse clusterHealthResponse = client().admin() + .cluster() + .prepareHealth() + .setWaitForStatus(ClusterHealthStatus.GREEN) + .setWaitForEvents(Priority.LANGUID) + .setWaitForNoRelocatingShards(true) + .setTimeout(TimeValue.timeValueSeconds(10)) + .execute() + .actionGet(); + assertFalse(clusterHealthResponse.isTimedOut()); + + ex = assertThrows( + SearchPhaseExecutionException.class, + () -> client(newPrimary).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get() + ); + assertEquals("all shards failed", ex.getMessage()); + assertHitCount( + client(newNode).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), + docs + moreDocs + uncommittedOps + ); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreDisruptionIT.java index b7b3f1d14f422..d5cdc22a15478 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreDisruptionIT.java @@ -8,10 +8,16 @@ package org.opensearch.remotestore; +import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; +import org.opensearch.action.admin.cluster.stats.ClusterStatsResponse; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.routing.allocation.command.MoveAllocationCommand; +import org.opensearch.common.Priority; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.index.Index; import org.opensearch.index.IndexService; +import org.opensearch.index.ReplicationStats; import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.IndicesService; import org.opensearch.indices.replication.SegmentReplicationState; @@ -20,10 +26,12 @@ import org.opensearch.indices.replication.common.ReplicationCollection; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.disruption.SlowClusterStateProcessing; import java.nio.file.Path; import java.util.Optional; import java.util.Set; +import java.util.concurrent.TimeUnit; /** * This class runs tests with remote store + segRep while blocking file downloads @@ -111,6 +119,75 @@ public void testCancelReplicationWhileFetchingMetadata() throws Exception { cleanupRepo(); } + public void testUpdateVisibleCheckpointWithLaggingClusterStateUpdates_primaryRelocation() throws Exception { + Path location = randomRepoPath().toAbsolutePath(); + Settings nodeSettings = Settings.builder().put(buildRemoteStoreNodeAttributes(location, 0d, "metadata", Long.MAX_VALUE)).build(); + internalCluster().startClusterManagerOnlyNode(nodeSettings); + internalCluster().startDataOnlyNodes(2, nodeSettings); + final Settings indexSettings = Settings.builder().put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).build(); + createIndex(INDEX_NAME, indexSettings); + ensureGreen(INDEX_NAME); + final Set dataNodeNames = internalCluster().getDataNodeNames(); + final String replicaNode = getNode(dataNodeNames, false); + final String oldPrimary = getNode(dataNodeNames, true); + + // index a doc. + client().prepareIndex(INDEX_NAME).setId("1").setSource("foo", randomInt()).get(); + refresh(INDEX_NAME); + + logger.info("--> start another node"); + final String newPrimary = internalCluster().startDataOnlyNode(nodeSettings); + ClusterHealthResponse clusterHealthResponse = client().admin() + .cluster() + .prepareHealth() + .setWaitForEvents(Priority.LANGUID) + .setWaitForNodes("4") + .get(); + assertEquals(clusterHealthResponse.isTimedOut(), false); + + SlowClusterStateProcessing disruption = new SlowClusterStateProcessing(replicaNode, random(), 0, 0, 1000, 2000); + internalCluster().setDisruptionScheme(disruption); + disruption.startDisrupting(); + + // relocate the primary + logger.info("--> relocate the shard"); + client().admin() + .cluster() + .prepareReroute() + .add(new MoveAllocationCommand(INDEX_NAME, 0, oldPrimary, newPrimary)) + .execute() + .actionGet(); + clusterHealthResponse = client().admin() + .cluster() + .prepareHealth() + .setWaitForEvents(Priority.LANGUID) + .setWaitForNoRelocatingShards(true) + .setTimeout(new TimeValue(5, TimeUnit.MINUTES)) + .execute() + .actionGet(); + assertEquals(clusterHealthResponse.isTimedOut(), false); + + IndexShard newPrimary_shard = getIndexShard(newPrimary, INDEX_NAME); + IndexShard replica = getIndexShard(replicaNode, INDEX_NAME); + assertBusy(() -> { + assertEquals( + newPrimary_shard.getLatestReplicationCheckpoint().getSegmentInfosVersion(), + replica.getLatestReplicationCheckpoint().getSegmentInfosVersion() + ); + }); + + assertBusy(() -> { + ClusterStatsResponse clusterStatsResponse = client().admin().cluster().prepareClusterStats().get(); + ReplicationStats replicationStats = clusterStatsResponse.getIndicesStats().getSegments().getReplicationStats(); + assertEquals(0L, replicationStats.maxBytesBehind); + assertEquals(0L, replicationStats.maxReplicationLag); + assertEquals(0L, replicationStats.totalBytesBehind); + }); + disruption.stopDisrupting(); + disableRepoConsistencyCheck("Remote Store Creates System Repository"); + cleanupRepo(); + } + private String getNode(Set dataNodeNames, boolean primary) { assertEquals(2, dataNodeNames.size()); for (String name : dataNodeNames) { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/SearchTimeoutIT.java b/server/src/internalClusterTest/java/org/opensearch/search/SearchTimeoutIT.java index 94816346e6c9e..52cc797ddd8da 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/SearchTimeoutIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/SearchTimeoutIT.java @@ -93,6 +93,7 @@ public void testSimpleTimeout() throws Exception { client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value").get(); } refresh("test"); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test") .setTimeout(new TimeValue(5, TimeUnit.MILLISECONDS)) @@ -104,12 +105,11 @@ public void testSimpleTimeout() throws Exception { } public void testSimpleDoesNotTimeout() throws Exception { - final int numDocs = 10; + final int numDocs = 9; for (int i = 0; i < numDocs; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value").get(); + client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); } - refresh("test"); - + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test") .setTimeout(new TimeValue(10000, TimeUnit.SECONDS)) .setQuery(scriptQuery(new Script(ScriptType.INLINE, "mockscript", SCRIPT_NAME, Collections.emptyMap()))) @@ -122,7 +122,7 @@ public void testSimpleDoesNotTimeout() throws Exception { public void testPartialResultsIntolerantTimeout() throws Exception { client().prepareIndex("test").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - + indexRandomForConcurrentSearch("test"); OpenSearchException ex = expectThrows( OpenSearchException.class, () -> client().prepareSearch("test") diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NaNSortingIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NaNSortingIT.java index 1ef2c0e8db8c7..6289cd5e36151 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NaNSortingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NaNSortingIT.java @@ -172,6 +172,7 @@ public void setupSuiteScopeCluster() throws Exception { client().prepareIndex("idx").setSource(source.endObject()).get(); } refresh(); + indexRandomForMultipleSlices("idx"); ensureSearchable(); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NestedIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NestedIT.java index b3009ffcf4f45..7af2ac218800d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NestedIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NestedIT.java @@ -224,6 +224,7 @@ public void setupSuiteScopeCluster() throws Exception { ) ); indexRandom(true, builders); + indexRandomForMultipleSlices("idx"); ensureSearchable(); } @@ -354,6 +355,7 @@ public void testNestedAsSubAggregation() throws Exception { } public void testNestNestedAggs() throws Exception { + indexRandomForConcurrentSearch("idx_nested_nested_aggs"); SearchResponse response = client().prepareSearch("idx_nested_nested_aggs") .addAggregation( nested("level1", "nested1").subAggregation( @@ -607,6 +609,7 @@ public void testNestedSameDocIdProcessedMultipleTime() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("idx4"); SearchResponse response = client().prepareSearch("idx4") .addAggregation( @@ -782,6 +785,7 @@ public void testFilterAggInsideNestedAgg() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("classes"); SearchResponse response = client().prepareSearch("classes") .addAggregation( diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/RangeIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/RangeIT.java index 64ab6f1382ac3..5812b7796c33e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/RangeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/RangeIT.java @@ -184,6 +184,7 @@ public void setupSuiteScopeCluster() throws Exception { builders.add(client().prepareIndex("new_index").setSource(Collections.emptyMap())); indexRandom(true, builders); + indexRandomForMultipleSlices("idx", "old_index", "new_index"); ensureSearchable(); } @@ -917,6 +918,7 @@ public void testOverlappingRanges() throws Exception { } public void testEmptyAggregation() throws Exception { + indexRandomForConcurrentSearch("empty_bucket_idx"); SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx") .setQuery(matchAllQuery()) .addAggregation( diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsDocCountErrorIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsDocCountErrorIT.java index 3fcf4b5d533d4..b355ce6d7a8dd 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsDocCountErrorIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsDocCountErrorIT.java @@ -225,6 +225,7 @@ public void setupSuiteScopeCluster() throws Exception { } indexRandom(true, builders); + indexRandomForMultipleSlices("idx"); ensureSearchable(); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityWithRequestBreakerIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityWithRequestBreakerIT.java index 2bf5230c67b43..6cbf278317b1b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityWithRequestBreakerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityWithRequestBreakerIT.java @@ -76,6 +76,7 @@ protected Settings featureFlagSettings() { /** * Test that searches using cardinality aggregations returns all request breaker memory. */ + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/10154") public void testRequestBreaker() throws Exception { final String requestBreaker = randomIntBetween(1, 10000) + "kb"; logger.info("--> Using request breaker setting: {}", requestBreaker); @@ -99,6 +100,7 @@ public void testRequestBreaker() throws Exception { ) .get(); + indexRandomForConcurrentSearch("test"); try { client().prepareSearch("test") .addAggregation( diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fetch/FetchSubPhasePluginIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fetch/FetchSubPhasePluginIT.java index 77ddc5bf636f0..87f2153eb800f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fetch/FetchSubPhasePluginIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fetch/FetchSubPhasePluginIT.java @@ -100,10 +100,6 @@ protected Collection> nodePlugins() { @SuppressWarnings("unchecked") public void testPlugin() throws Exception { - assumeFalse( - "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/11112", - internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) - ); client().admin() .indices() .prepareCreate("test") diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/InnerHitsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/InnerHitsIT.java index 9b3e1337418cc..1a730c01e4890 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/InnerHitsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/InnerHitsIT.java @@ -897,6 +897,7 @@ public void testNestedSource() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("index1"); // the field name (comments.message) used for source filtering should be the same as when using that field for // other features (like in the query dsl or aggs) in order for consistency: @@ -973,6 +974,7 @@ public void testInnerHitsWithIgnoreUnmapped() throws Exception { client().prepareIndex("index1").setId("1").setSource("nested_type", Collections.singletonMap("key", "value")).get(); client().prepareIndex("index2").setId("3").setSource("key", "value").get(); refresh(); + indexRandomForConcurrentSearch("index1", "index2"); SearchResponse response = client().prepareSearch("index1", "index2") .setQuery( @@ -1002,6 +1004,7 @@ public void testUseMaxDocInsteadOfSize() throws Exception { .setRefreshPolicy(IMMEDIATE) .get(); + indexRandomForConcurrentSearch("index2"); QueryBuilder query = nestedQuery("nested", matchQuery("nested.field", "value1"), ScoreMode.Avg).innerHit( new InnerHitBuilder().setSize(ArrayUtil.MAX_ARRAY_LENGTH - 1) ); @@ -1019,6 +1022,7 @@ public void testTooHighResultWindow() throws Exception { ) .setRefreshPolicy(IMMEDIATE) .get(); + indexRandomForConcurrentSearch("index2"); SearchResponse response = client().prepareSearch("index2") .setQuery( nestedQuery("nested", matchQuery("nested.field", "value1"), ScoreMode.Avg).innerHit( diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/MatchedQueriesIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/MatchedQueriesIT.java index 23b5d0cab0697..83cedb8c20e1d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/MatchedQueriesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/MatchedQueriesIT.java @@ -91,6 +91,7 @@ public void testSimpleMatchedQueryFromFilteredQuery() throws Exception { client().prepareIndex("test").setId("2").setSource("name", "test2", "number", 2).get(); client().prepareIndex("test").setId("3").setSource("name", "test3", "number", 3).get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery( @@ -141,6 +142,7 @@ public void testSimpleMatchedQueryFromTopLevelFilter() throws Exception { client().prepareIndex("test").setId("2").setSource("name", "test").get(); client().prepareIndex("test").setId("3").setSource("name", "test").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery(matchAllQuery()) @@ -192,6 +194,7 @@ public void testSimpleMatchedQueryFromTopLevelFilterAndFilteredQuery() throws Ex client().prepareIndex("test").setId("2").setSource("name", "test", "title", "title2").get(); client().prepareIndex("test").setId("3").setSource("name", "test", "title", "title3").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery(boolQuery().must(matchAllQuery()).filter(termsQuery("title", "title1", "title2", "title3").queryName("title"))) @@ -224,12 +227,13 @@ public void testSimpleMatchedQueryFromTopLevelFilterAndFilteredQuery() throws Ex } } - public void testRegExpQuerySupportsName() { + public void testRegExpQuerySupportsName() throws InterruptedException { createIndex("test1"); ensureGreen(); client().prepareIndex("test1").setId("1").setSource("title", "title1").get(); refresh(); + indexRandomForConcurrentSearch("test1"); SearchResponse searchResponse = client().prepareSearch() .setQuery(QueryBuilders.regexpQuery("title", "title1").queryName("regex")) @@ -246,12 +250,13 @@ public void testRegExpQuerySupportsName() { } } - public void testPrefixQuerySupportsName() { + public void testPrefixQuerySupportsName() throws InterruptedException { createIndex("test1"); ensureGreen(); client().prepareIndex("test1").setId("1").setSource("title", "title1").get(); refresh(); + indexRandomForConcurrentSearch("test1"); SearchResponse searchResponse = client().prepareSearch() .setQuery(QueryBuilders.prefixQuery("title", "title").queryName("prefix")) @@ -268,12 +273,13 @@ public void testPrefixQuerySupportsName() { } } - public void testFuzzyQuerySupportsName() { + public void testFuzzyQuerySupportsName() throws InterruptedException { createIndex("test1"); ensureGreen(); client().prepareIndex("test1").setId("1").setSource("title", "title1").get(); refresh(); + indexRandomForConcurrentSearch("test1"); SearchResponse searchResponse = client().prepareSearch() .setQuery(QueryBuilders.fuzzyQuery("title", "titel1").queryName("fuzzy")) @@ -290,12 +296,13 @@ public void testFuzzyQuerySupportsName() { } } - public void testWildcardQuerySupportsName() { + public void testWildcardQuerySupportsName() throws InterruptedException { createIndex("test1"); ensureGreen(); client().prepareIndex("test1").setId("1").setSource("title", "title1").get(); refresh(); + indexRandomForConcurrentSearch("test1"); SearchResponse searchResponse = client().prepareSearch() .setQuery(QueryBuilders.wildcardQuery("title", "titl*").queryName("wildcard")) @@ -312,12 +319,13 @@ public void testWildcardQuerySupportsName() { } } - public void testSpanFirstQuerySupportsName() { + public void testSpanFirstQuerySupportsName() throws InterruptedException { createIndex("test1"); ensureGreen(); client().prepareIndex("test1").setId("1").setSource("title", "title1 title2").get(); refresh(); + indexRandomForConcurrentSearch("test1"); SearchResponse searchResponse = client().prepareSearch() .setQuery(QueryBuilders.spanFirstQuery(QueryBuilders.spanTermQuery("title", "title1"), 10).queryName("span")) @@ -344,6 +352,7 @@ public void testMatchedWithShould() throws Exception { client().prepareIndex("test").setId("1").setSource("content", "Lorem ipsum dolor sit amet").get(); client().prepareIndex("test").setId("2").setSource("content", "consectetur adipisicing elit").get(); refresh(); + indexRandomForConcurrentSearch("test"); // Execute search at least two times to load it in cache int iter = scaledRandomIntBetween(2, 10); @@ -378,6 +387,7 @@ public void testMatchedWithWrapperQuery() throws Exception { client().prepareIndex("test").setId("1").setSource("content", "Lorem ipsum dolor sit amet").get(); refresh(); + indexRandomForConcurrentSearch("test"); MatchQueryBuilder matchQueryBuilder = matchQuery("content", "amet").queryName("abc"); BytesReference matchBytes = XContentHelper.toXContent(matchQueryBuilder, MediaTypeRegistry.JSON, false); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java index 799bbf91a567d..ed8fe74504f92 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java @@ -251,6 +251,7 @@ public void testStoredFields() throws Exception { .get(); client().admin().indices().prepareRefresh().get(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addStoredField("field1").get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); @@ -358,6 +359,7 @@ public void testScriptDocAndFields() throws Exception { ) .get(); client().admin().indices().refresh(refreshRequest()).actionGet(); + indexRandomForConcurrentSearch("test"); logger.info("running doc['num1'].value"); SearchResponse response = client().prepareSearch() @@ -458,6 +460,7 @@ public void testScriptWithUnsignedLong() throws Exception { ) .get(); client().admin().indices().refresh(refreshRequest()).actionGet(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch() .setQuery(matchAllQuery()) @@ -547,6 +550,7 @@ public void testScriptFieldWithNanos() throws Exception { .setSource(jsonBuilder().startObject().field("date", "1970-01-01T00:00:00.000Z").endObject()), client().prepareIndex("test").setId("2").setSource(jsonBuilder().startObject().field("date", date).endObject()) ); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch() .setQuery(matchAllQuery()) @@ -632,6 +636,7 @@ public void testScriptFieldUsingSource() throws Exception { ) .get(); client().admin().indices().refresh(refreshRequest()).actionGet(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch() .setQuery(matchAllQuery()) @@ -674,6 +679,7 @@ public void testScriptFieldUsingSource() throws Exception { public void testScriptFieldsForNullReturn() throws Exception { client().prepareIndex("test").setId("1").setSource("foo", "bar").setRefreshPolicy("true").get(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch() .setQuery(matchAllQuery()) .addScriptField("test_script_1", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "return null", Collections.emptyMap())) @@ -795,6 +801,7 @@ public void testStoredFieldsWithoutSource() throws Exception { .get(); client().admin().indices().prepareRefresh().get(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery(matchAllQuery()) @@ -852,6 +859,7 @@ public void testSearchFieldsMetadata() throws Exception { .setSource(jsonBuilder().startObject().field("field1", "value").endObject()) .setRefreshPolicy(IMMEDIATE) .get(); + indexRandomForConcurrentSearch("my-index"); SearchResponse searchResponse = client().prepareSearch("my-index").addStoredField("field1").addStoredField("_routing").get(); @@ -866,6 +874,7 @@ public void testSearchFieldsNonLeafField() throws Exception { .setSource(jsonBuilder().startObject().startObject("field1").field("field2", "value1").endObject().endObject()) .setRefreshPolicy(IMMEDIATE) .get(); + indexRandomForConcurrentSearch("my-index"); assertFailures( client().prepareSearch("my-index").addStoredField("field1"), @@ -932,6 +941,7 @@ public void testGetFieldsComplexField() throws Exception { ); client().prepareIndex("my-index").setId("1").setRefreshPolicy(IMMEDIATE).setSource(source, MediaTypeRegistry.JSON).get(); + indexRandomForConcurrentSearch("my-index"); String field = "field1.field2.field3.field4"; @@ -1039,6 +1049,7 @@ public void testDocValueFields() throws Exception { .get(); client().admin().indices().prepareRefresh().get(); + indexRandomForConcurrentSearch("test"); SearchRequestBuilder builder = client().prepareSearch() .setQuery(matchAllQuery()) @@ -1271,6 +1282,7 @@ public void testScriptFields() throws Exception { ); } indexRandom(true, reqs); + indexRandomForConcurrentSearch("index"); ensureSearchable(); SearchRequestBuilder req = client().prepareSearch("index"); for (String field : Arrays.asList("s", "ms", "l", "ml", "d", "md")) { @@ -1326,6 +1338,7 @@ public void testDocValueFieldsWithFieldAlias() throws Exception { index("test", MapperService.SINGLE_MAPPING_NAME, "1", "text_field", "foo", "date_field", formatter.print(date)); refresh("test"); + indexRandomForConcurrentSearch("test"); SearchRequestBuilder builder = client().prepareSearch() .setQuery(matchAllQuery()) @@ -1387,6 +1400,7 @@ public void testWildcardDocValueFieldsWithFieldAlias() throws Exception { index("test", MapperService.SINGLE_MAPPING_NAME, "1", "text_field", "foo", "date_field", formatter.print(date)); refresh("test"); + indexRandomForConcurrentSearch("test"); SearchRequestBuilder builder = client().prepareSearch() .setQuery(matchAllQuery()) @@ -1440,6 +1454,7 @@ public void testStoredFieldsWithFieldAlias() throws Exception { index("test", MapperService.SINGLE_MAPPING_NAME, "1", "field1", "value1", "field2", "value2"); refresh("test"); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery(matchAllQuery()) @@ -1482,6 +1497,7 @@ public void testWildcardStoredFieldsWithFieldAlias() throws Exception { index("test", MapperService.SINGLE_MAPPING_NAME, "1", "field1", "value1", "field2", "value2"); refresh("test"); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addStoredField("field*").get(); assertHitCount(searchResponse, 1L); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/DecayFunctionScoreIT.java b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/DecayFunctionScoreIT.java index 6eb528e0bb7d3..3a6624c2ad2e6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/DecayFunctionScoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/DecayFunctionScoreIT.java @@ -398,6 +398,7 @@ public void testBoostModeSettingWorks() throws Exception { ) ); indexRandom(true, false, indexBuilders); // force no dummy docs + indexRandomForConcurrentSearch("test"); // Test Gauss List lonlat = new ArrayList<>(); @@ -482,6 +483,7 @@ public void testParseGeoPoint() throws Exception { constantScoreQuery(termQuery("test", "value")), ScoreFunctionBuilders.weightFactorFunction(randomIntBetween(1, 10)) ); + indexRandomForConcurrentSearch("test"); GeoPoint point = new GeoPoint(20, 11); ActionFuture response = client().search( searchRequest().searchType(SearchType.QUERY_THEN_FETCH) @@ -535,6 +537,7 @@ public void testCombineModes() throws Exception { .setRefreshPolicy(IMMEDIATE) .setSource(jsonBuilder().startObject().field("test", "value value").field("num", 1.0).endObject()) .get(); + indexRandomForConcurrentSearch("test"); FunctionScoreQueryBuilder baseQuery = functionScoreQuery( constantScoreQuery(termQuery("test", "value")), ScoreFunctionBuilders.weightFactorFunction(2) @@ -654,6 +657,7 @@ public void testCombineModesExplain() throws Exception { constantScoreQuery(termQuery("test", "value")).queryName("query1"), ScoreFunctionBuilders.weightFactorFunction(2, "weight1") ); + indexRandomForConcurrentSearch("test"); // decay score should return 0.5 for this function and baseQuery should return 2.0f as it's score ActionFuture response = client().search( searchRequest().searchType(SearchType.QUERY_THEN_FETCH) @@ -762,6 +766,7 @@ public void testParseDateMath() throws Exception { ).actionGet(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse sr = client().search( searchRequest().source( searchSource().query(functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num1", "now", "2d"))) @@ -817,6 +822,7 @@ public void testValueMissingLin() throws Exception { ).actionGet(); refresh(); + indexRandomForConcurrentSearch("test"); ActionFuture response = client().search( searchRequest().searchType(SearchType.QUERY_THEN_FETCH) @@ -893,6 +899,7 @@ public void testDateWithoutOrigin() throws Exception { ).actionGet(); refresh(); + indexRandomForConcurrentSearch("test"); ActionFuture response = client().search( searchRequest().searchType(SearchType.QUERY_THEN_FETCH) @@ -974,6 +981,7 @@ public void testManyDocsLin() throws Exception { List lonlat = new ArrayList<>(); lonlat.add(100f); lonlat.add(110f); + indexRandomForConcurrentSearch("test"); ActionFuture response = client().search( searchRequest().searchType(SearchType.QUERY_THEN_FETCH) .source( @@ -1107,6 +1115,7 @@ public void testNoQueryGiven() throws Exception { client().index(indexRequest("test").source(jsonBuilder().startObject().field("test", "value").field("num", 1.0).endObject())) .actionGet(); refresh(); + indexRandomForConcurrentSearch("test"); // so, we indexed a string field, but now we try to score a num field ActionFuture response = client().search( searchRequest().searchType(SearchType.QUERY_THEN_FETCH) @@ -1171,6 +1180,7 @@ public void testMultiFieldOptions() throws Exception { ); indexRandom(true, doc1, doc2); + indexRandomForConcurrentSearch("test"); ActionFuture response = client().search(searchRequest().source(searchSource().query(baseQuery))); SearchResponse sr = response.actionGet(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScoreFieldValueIT.java b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScoreFieldValueIT.java index b09914c4aa764..d53f55b98bd23 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScoreFieldValueIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScoreFieldValueIT.java @@ -80,7 +80,7 @@ protected Settings featureFlagSettings() { return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); } - public void testFieldValueFactor() throws IOException { + public void testFieldValueFactor() throws IOException, InterruptedException { assertAcked( prepareCreate("test").setMapping( jsonBuilder().startObject() @@ -99,8 +99,8 @@ public void testFieldValueFactor() throws IOException { client().prepareIndex("test").setId("1").setSource("test", 5, "body", "foo").get(); client().prepareIndex("test").setId("2").setSource("test", 17, "body", "foo").get(); client().prepareIndex("test").setId("3").setSource("body", "bar").get(); - refresh(); + indexRandomForConcurrentSearch("test"); // document 2 scores higher because 17 > 5 SearchResponse response = client().prepareSearch("test") @@ -189,7 +189,7 @@ public void testFieldValueFactor() throws IOException { } } - public void testFieldValueFactorExplain() throws IOException { + public void testFieldValueFactorExplain() throws IOException, InterruptedException { assertAcked( prepareCreate("test").setMapping( jsonBuilder().startObject() @@ -210,6 +210,7 @@ public void testFieldValueFactorExplain() throws IOException { client().prepareIndex("test").setId("3").setSource("body", "bar").get(); refresh(); + indexRandomForConcurrentSearch("test"); // document 2 scores higher because 17 > 5 final String functionName = "func1"; diff --git a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScoreIT.java b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScoreIT.java index 88395f25700d2..3b80d437e95c0 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScoreIT.java @@ -126,10 +126,11 @@ protected Map, Object>> pluginScripts() { } } - public void testScriptScoresNested() throws IOException { + public void testScriptScoresNested() throws IOException, InterruptedException { createIndex(INDEX); index(INDEX, TYPE, "1", jsonBuilder().startObject().field("dummy_field", 1).endObject()); refresh(); + indexRandomForConcurrentSearch(INDEX); Script scriptOne = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "1", Collections.emptyMap()); Script scriptTwo = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "get score value", Collections.emptyMap()); @@ -148,10 +149,11 @@ public void testScriptScoresNested() throws IOException { assertThat(response.getHits().getAt(0).getScore(), equalTo(1.0f)); } - public void testScriptScoresWithAgg() throws IOException { + public void testScriptScoresWithAgg() throws IOException, InterruptedException { createIndex(INDEX); index(INDEX, TYPE, "1", jsonBuilder().startObject().field("dummy_field", 1).endObject()); refresh(); + indexRandomForConcurrentSearch(INDEX); Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "get score value", Collections.emptyMap()); @@ -166,10 +168,11 @@ public void testScriptScoresWithAgg() throws IOException { assertThat(((Terms) response.getAggregations().asMap().get("score_agg")).getBuckets().get(0).getDocCount(), is(1L)); } - public void testScriptScoresWithAggWithExplain() throws IOException { + public void testScriptScoresWithAggWithExplain() throws IOException, InterruptedException { createIndex(INDEX); index(INDEX, TYPE, "1", jsonBuilder().startObject().field("dummy_field", 1).endObject()); refresh(); + indexRandomForConcurrentSearch(INDEX); Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "get score value", Collections.emptyMap()); @@ -195,7 +198,7 @@ public void testScriptScoresWithAggWithExplain() throws IOException { assertThat(((Terms) response.getAggregations().asMap().get("score_agg")).getBuckets().get(0).getDocCount(), is(1L)); } - public void testMinScoreFunctionScoreBasic() throws IOException { + public void testMinScoreFunctionScoreBasic() throws IOException, InterruptedException { float score = randomValueOtherThanMany((f) -> Float.compare(f, 0) < 0, OpenSearchTestCase::randomFloat); float minScore = randomValueOtherThanMany((f) -> Float.compare(f, 0) < 0, OpenSearchTestCase::randomFloat); index( @@ -207,6 +210,7 @@ public void testMinScoreFunctionScoreBasic() throws IOException { .endObject() ); refresh(); + indexRandomForConcurrentSearch(INDEX); ensureYellow(); Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['random_score']", Collections.emptyMap()); @@ -291,6 +295,7 @@ public void testWithEmptyFunctions() throws IOException, ExecutionException, Int assertAcked(prepareCreate("test")); index("test", "testtype", "1", jsonBuilder().startObject().field("text", "test text").endObject()); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse termQuery = client().search(searchRequest().source(searchSource().explain(true).query(termQuery("text", "text")))) .get(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScorePluginIT.java b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScorePluginIT.java index 1df4acac0dcf0..a91f53dae04d2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScorePluginIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScorePluginIT.java @@ -122,6 +122,7 @@ public void testPlugin() throws Exception { ).actionGet(); client().admin().indices().prepareRefresh().get(); + indexRandomForConcurrentSearch("test"); DecayFunctionBuilder gfb = new CustomDistanceScoreBuilder("num1", "2013-05-28", "+1d"); ActionFuture response = client().search( diff --git a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/QueryRescorerIT.java b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/QueryRescorerIT.java index de4c85301547c..bda6284d9535a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/QueryRescorerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/QueryRescorerIT.java @@ -110,7 +110,7 @@ protected Settings featureFlagSettings() { return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); } - public void testEnforceWindowSize() { + public void testEnforceWindowSize() throws InterruptedException { createIndex("test"); // this int iters = scaledRandomIntBetween(10, 20); @@ -118,6 +118,7 @@ public void testEnforceWindowSize() { client().prepareIndex("test").setId(Integer.toString(i)).setSource("f", Integer.toString(i)).get(); } refresh(); + indexRandomForConcurrentSearch("test"); int numShards = getNumShards("test").numPrimaries; for (int j = 0; j < iters; j++) { @@ -169,6 +170,7 @@ public void testRescorePhrase() throws Exception { .setSource("field1", "quick huge brown", "field2", "the quick lazy huge brown fox jumps over the tree") .get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(Operator.OR)) .setRescorer( @@ -474,6 +476,7 @@ private static void assertEquivalent(String query, SearchResponse plain, SearchR public void testEquivalence() throws Exception { // no dummy docs since merges can change scores while we run queries. int numDocs = indexRandomNumbers("whitespace", -1, false); + indexRandomForConcurrentSearch("test"); final int iters = scaledRandomIntBetween(50, 100); for (int i = 0; i < iters; i++) { @@ -545,6 +548,7 @@ public void testExplain() throws Exception { .setSource("field1", "quick huge brown", "field2", "the quick lazy huge brown fox jumps over the tree") .get(); refresh(); + indexRandomForConcurrentSearch("test"); { SearchResponse searchResponse = client().prepareSearch() @@ -816,6 +820,7 @@ public void testFromSize() throws Exception { client().prepareIndex("test").setId("" + i).setSource("text", "hello world").get(); } refresh(); + indexRandomForConcurrentSearch("test"); SearchRequestBuilder request = client().prepareSearch(); request.setQuery(QueryBuilders.termQuery("text", "hello")); @@ -832,6 +837,7 @@ public void testRescorePhaseWithInvalidSort() throws Exception { client().prepareIndex("test").setId("" + i).setSource("number", 0).get(); } refresh(); + indexRandomForConcurrentSearch("test"); Exception exc = expectThrows( Exception.class, diff --git a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/RandomScoreFunctionIT.java b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/RandomScoreFunctionIT.java index 54cfcb7e6a683..69e30fc879dd8 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/RandomScoreFunctionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/RandomScoreFunctionIT.java @@ -135,6 +135,7 @@ public void testConsistentHitsWithSameSeed() throws Exception { } flush(); refresh(); + indexRandomForConcurrentSearch("test"); int outerIters = scaledRandomIntBetween(10, 20); for (int o = 0; o < outerIters; o++) { final int seed = randomInt(); @@ -299,6 +300,7 @@ public void testSeedReportedInExplain() throws Exception { index("test", "type", "1", jsonBuilder().startObject().endObject()); flush(); refresh(); + indexRandomForConcurrentSearch("test"); int seed = 12345678; @@ -318,6 +320,7 @@ public void testSeedAndNameReportedInExplain() throws Exception { index("test", "type", "1", jsonBuilder().startObject().endObject()); flush(); refresh(); + indexRandomForConcurrentSearch("test"); int seed = 12345678; diff --git a/server/src/internalClusterTest/java/org/opensearch/search/msearch/MultiSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/msearch/MultiSearchIT.java index bc1d2833ecbbf..b35208941d2a2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/msearch/MultiSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/msearch/MultiSearchIT.java @@ -71,12 +71,13 @@ protected Settings featureFlagSettings() { return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); } - public void testSimpleMultiSearch() { + public void testSimpleMultiSearch() throws InterruptedException { createIndex("test"); ensureGreen(); client().prepareIndex("test").setId("1").setSource("field", "xxx").get(); client().prepareIndex("test").setId("2").setSource("field", "yyy").get(); refresh(); + indexRandomForConcurrentSearch("test"); MultiSearchResponse response = client().prepareMultiSearch() .add(client().prepareSearch("test").setQuery(QueryBuilders.termQuery("field", "xxx"))) .add(client().prepareSearch("test").setQuery(QueryBuilders.termQuery("field", "yyy"))) @@ -94,13 +95,14 @@ public void testSimpleMultiSearch() { assertFirstHit(response.getResponses()[1].getResponse(), hasId("2")); } - public void testSimpleMultiSearchMoreRequests() { + public void testSimpleMultiSearchMoreRequests() throws InterruptedException { createIndex("test"); int numDocs = randomIntBetween(0, 16); for (int i = 0; i < numDocs; i++) { client().prepareIndex("test").setId(Integer.toString(i)).setSource("{}", MediaTypeRegistry.JSON).get(); } refresh(); + indexRandomForConcurrentSearch("test"); int numSearchRequests = randomIntBetween(1, 64); MultiSearchRequest request = new MultiSearchRequest(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedIT.java b/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedIT.java index 656e7b2e366ed..6d0b074c3a660 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedIT.java @@ -126,6 +126,7 @@ public void testSimpleNested() throws Exception { .get(); waitForRelocation(ClusterHealthStatus.GREEN); + indexRandomForConcurrentSearch("test"); GetResponse getResponse = client().prepareGet("test", "1").get(); assertThat(getResponse.isExists(), equalTo(true)); assertThat(getResponse.getSourceAsBytes(), notNullValue()); @@ -569,6 +570,7 @@ public void testSimpleNestedSorting() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test") .setQuery(QueryBuilders.matchAllQuery()) @@ -677,6 +679,7 @@ public void testSimpleNestedSortingWithNestedFilterMissing() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchRequestBuilder searchRequestBuilder = client().prepareSearch("test") .setQuery(QueryBuilders.matchAllQuery()) @@ -865,6 +868,7 @@ public void testNestedSortWithMultiLevelFiltering() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); // access id = 1, read, max value, asc, should use grault and quxx SearchResponse searchResponse = client().prepareSearch() @@ -970,10 +974,6 @@ public void testNestedSortWithMultiLevelFiltering() throws Exception { // https://github.com/elastic/elasticsearch/issues/31554 public void testLeakingSortValues() throws Exception { - assumeFalse( - "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/11065", - internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) - ); assertAcked( prepareCreate("test").setSettings(Settings.builder().put("number_of_shards", 1)) .setMapping( @@ -1222,6 +1222,7 @@ public void testSortNestedWithNestedFilter() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); // Without nested filter SearchResponse searchResponse = client().prepareSearch() @@ -1602,6 +1603,7 @@ public void testNestedSortingWithNestedFilterAsFilter() throws Exception { .get(); assertTrue(indexResponse2.getShardInfo().getSuccessful() > 0); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test") .addSort(SortBuilders.fieldSort("users.first").setNestedPath("users").order(SortOrder.ASC)) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/preference/SearchPreferenceIT.java b/server/src/internalClusterTest/java/org/opensearch/search/preference/SearchPreferenceIT.java index 97fe05f5b9747..6e40c08ed08a1 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/preference/SearchPreferenceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/preference/SearchPreferenceIT.java @@ -160,12 +160,13 @@ public void testNoPreferenceRandom() throws Exception { assertThat(firstNodeId, not(equalTo(secondNodeId))); } - public void testSimplePreference() { + public void testSimplePreference() throws InterruptedException { client().admin().indices().prepareCreate("test").setSettings("{\"number_of_replicas\": 1}", MediaTypeRegistry.JSON).get(); ensureGreen(); client().prepareIndex("test").setSource("field1", "value1").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(matchAllQuery()).get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); @@ -264,7 +265,7 @@ private void assertSearchOnRandomNodes(SearchRequestBuilder request) { assertThat(hitNodes.size(), greaterThan(1)); } - public void testCustomPreferenceUnaffectedByOtherShardMovements() { + public void testCustomPreferenceUnaffectedByOtherShardMovements() throws InterruptedException { /* * Custom preferences can be used to encourage searches to go to a consistent set of shard copies, meaning that other copies' data @@ -283,6 +284,7 @@ public void testCustomPreferenceUnaffectedByOtherShardMovements() { ensureGreen(); client().prepareIndex("test").setSource("field1", "value1").get(); refresh(); + indexRandomForConcurrentSearch("test"); final String customPreference = randomAlphaOfLength(10); @@ -302,6 +304,7 @@ public void testCustomPreferenceUnaffectedByOtherShardMovements() { prepareCreate("test2").setSettings(Settings.builder().put(indexSettings()).put(SETTING_NUMBER_OF_REPLICAS, replicasInNewIndex)) ); ensureGreen(); + indexRandomForConcurrentSearch("test2"); assertSearchesSpecificNode("test", customPreference, nodeId); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java index 53bded1fc493c..03312c6e1e2f7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java @@ -196,7 +196,7 @@ public void testEmptyQueryString() throws ExecutionException, InterruptedExcepti } // see https://github.com/elastic/elasticsearch/issues/3177 - public void testIssue3177() { + public void testIssue3177() throws InterruptedException { createIndex("test"); client().prepareIndex("test").setId("1").setSource("field1", "value1").get(); client().prepareIndex("test").setId("2").setSource("field1", "value2").get(); @@ -205,6 +205,7 @@ public void testIssue3177() { waitForRelocation(); forceMerge(); refresh(); + indexRandomForConcurrentSearch("test"); assertHitCount( client().prepareSearch() .setQuery(matchAllQuery()) @@ -394,7 +395,19 @@ public void testCommonTermsQuery() throws Exception { assertSecondHit(searchResponse, hasId("2")); assertThirdHit(searchResponse, hasId("3")); - searchResponse = client().prepareSearch().setQuery(commonTermsQuery("field1", "the huge fox").lowFreqMinimumShouldMatch("2")).get(); + // cutoff frequency of 1 makes all terms high frequency so the query gets rewritten as a + // conjunction of all terms (the lowFreqMinimumShouldMatch parameter is effectively ignored) + searchResponse = client().prepareSearch() + .setQuery(commonTermsQuery("field1", "the huge fox").cutoffFrequency(1).lowFreqMinimumShouldMatch("2")) + .get(); + assertHitCount(searchResponse, 1L); + assertFirstHit(searchResponse, hasId("2")); + + // cutoff frequency of 100 makes all terms low frequency, so lowFreqMinimumShouldMatch=3 + // means all terms must match + searchResponse = client().prepareSearch() + .setQuery(commonTermsQuery("field1", "the huge fox").cutoffFrequency(100).lowFreqMinimumShouldMatch("3")) + .get(); assertHitCount(searchResponse, 1L); assertFirstHit(searchResponse, hasId("2")); @@ -465,6 +478,7 @@ public void testQueryStringAnalyzedWildcard() throws Exception { client().prepareIndex("test").setId("1").setSource("field1", "value_1", "field2", "value_2").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(queryStringQuery("value*")).get(); assertHitCount(searchResponse, 1L); @@ -482,11 +496,12 @@ public void testQueryStringAnalyzedWildcard() throws Exception { assertHitCount(searchResponse, 1L); } - public void testLowercaseExpandedTerms() { + public void testLowercaseExpandedTerms() throws InterruptedException { createIndex("test"); client().prepareIndex("test").setId("1").setSource("field1", "value_1", "field2", "value_2").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(queryStringQuery("VALUE_3~1")).get(); assertHitCount(searchResponse, 1L); @@ -499,7 +514,7 @@ public void testLowercaseExpandedTerms() { } // Issue #3540 - public void testDateRangeInQueryString() { + public void testDateRangeInQueryString() throws InterruptedException { // the mapping needs to be provided upfront otherwise we are not sure how many failures we get back // as with dynamic mappings some shards might be lacking behind and parse a different query assertAcked(prepareCreate("test").setMapping("past", "type=date", "future", "type=date")); @@ -510,6 +525,7 @@ public void testDateRangeInQueryString() { client().prepareIndex("test").setId("1").setSource("past", aMonthAgo, "future", aMonthFromNow).get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(queryStringQuery("past:[now-2M/d TO now/d]")).get(); assertHitCount(searchResponse, 1L); @@ -525,7 +541,7 @@ public void testDateRangeInQueryString() { } // Issue #7880 - public void testDateRangeInQueryStringWithTimeZone_7880() { + public void testDateRangeInQueryStringWithTimeZone_7880() throws InterruptedException { // the mapping needs to be provided upfront otherwise we are not sure how many failures we get back // as with dynamic mappings some shards might be lacking behind and parse a different query assertAcked(prepareCreate("test").setMapping("past", "type=date")); @@ -536,6 +552,7 @@ public void testDateRangeInQueryStringWithTimeZone_7880() { client().prepareIndex("test").setId("1").setSource("past", now).get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery(queryStringQuery("past:[now-1m/m TO now+1m/m]").timeZone(timeZone.getId())) .get(); @@ -543,7 +560,7 @@ public void testDateRangeInQueryStringWithTimeZone_7880() { } // Issue #10477 - public void testDateRangeInQueryStringWithTimeZone_10477() { + public void testDateRangeInQueryStringWithTimeZone_10477() throws InterruptedException { // the mapping needs to be provided upfront otherwise we are not sure how many failures we get back // as with dynamic mappings some shards might be lacking behind and parse a different query assertAcked(prepareCreate("test").setMapping("past", "type=date")); @@ -552,6 +569,7 @@ public void testDateRangeInQueryStringWithTimeZone_10477() { client().prepareIndex("test").setId("2").setSource("past", "2015-04-06T00:00:00+0000").get(); refresh(); + indexRandomForConcurrentSearch("test"); // Timezone set with dates SearchResponse searchResponse = client().prepareSearch() .setQuery(queryStringQuery("past:[2015-04-06T00:00:00+0200 TO 2015-04-06T23:00:00+0200]")) @@ -725,6 +743,7 @@ public void testPassQueryOrFilterAsJSONString() throws Exception { createIndex("test"); client().prepareIndex("test").setId("1").setSource("field1", "value1_1", "field2", "value2_1").setRefreshPolicy(IMMEDIATE).get(); + indexRandomForConcurrentSearch("test"); WrapperQueryBuilder wrapper = new WrapperQueryBuilder("{ \"term\" : { \"field1\" : \"value1_1\" } }"); assertHitCount(client().prepareSearch().setQuery(wrapper).get(), 1L); @@ -741,6 +760,7 @@ public void testFiltersWithCustomCacheKey() throws Exception { client().prepareIndex("test").setId("1").setSource("field1", "value1").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test").setQuery(constantScoreQuery(termsQuery("field1", "value1"))).get(); assertHitCount(searchResponse, 1L); @@ -782,6 +802,7 @@ public void testMatchQueryFuzzy() throws Exception { client().prepareIndex("test").setId("1").setSource("text", "Unit"), client().prepareIndex("test").setId("2").setSource("text", "Unity") ); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(matchQuery("text", "uniy").fuzziness(Fuzziness.ZERO)).get(); assertHitCount(searchResponse, 0L); @@ -868,11 +889,12 @@ public void testMultiMatchQuery() throws Exception { assertFirstHit(searchResponse, hasId("1")); } - public void testMatchQueryZeroTermsQuery() { + public void testMatchQueryZeroTermsQuery() throws InterruptedException { assertAcked(prepareCreate("test").setMapping("field1", "type=text,analyzer=classic", "field2", "type=text,analyzer=classic")); client().prepareIndex("test").setId("1").setSource("field1", "value1").get(); client().prepareIndex("test").setId("2").setSource("field1", "value2").get(); refresh(); + indexRandomForConcurrentSearch("test"); BoolQueryBuilder boolQuery = boolQuery().must(matchQuery("field1", "a").zeroTermsQuery(MatchQuery.ZeroTermsQuery.NONE)) .must(matchQuery("field1", "value1").zeroTermsQuery(MatchQuery.ZeroTermsQuery.NONE)); @@ -889,11 +911,12 @@ public void testMatchQueryZeroTermsQuery() { assertHitCount(searchResponse, 2L); } - public void testMultiMatchQueryZeroTermsQuery() { + public void testMultiMatchQueryZeroTermsQuery() throws InterruptedException { assertAcked(prepareCreate("test").setMapping("field1", "type=text,analyzer=classic", "field2", "type=text,analyzer=classic")); client().prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value2").get(); client().prepareIndex("test").setId("2").setSource("field1", "value3", "field2", "value4").get(); refresh(); + indexRandomForConcurrentSearch("test"); BoolQueryBuilder boolQuery = boolQuery().must( multiMatchQuery("a", "field1", "field2").zeroTermsQuery(MatchQuery.ZeroTermsQuery.NONE) @@ -913,11 +936,12 @@ public void testMultiMatchQueryZeroTermsQuery() { assertHitCount(searchResponse, 2L); } - public void testMultiMatchQueryMinShouldMatch() { + public void testMultiMatchQueryMinShouldMatch() throws InterruptedException { createIndex("test"); client().prepareIndex("test").setId("1").setSource("field1", new String[] { "value1", "value2", "value3" }).get(); client().prepareIndex("test").setId("2").setSource("field2", "value1").get(); refresh(); + indexRandomForConcurrentSearch("test"); MultiMatchQueryBuilder multiMatchQuery = multiMatchQuery("value1 value2 foo", "field1", "field2"); @@ -959,12 +983,13 @@ public void testMultiMatchQueryMinShouldMatch() { assertHitCount(searchResponse, 0L); } - public void testBoolQueryMinShouldMatchBiggerThanNumberOfShouldClauses() throws IOException { + public void testBoolQueryMinShouldMatchBiggerThanNumberOfShouldClauses() throws IOException, InterruptedException { createIndex("test"); client().prepareIndex("test").setId("1").setSource("field1", new String[] { "value1", "value2", "value3" }).get(); client().prepareIndex("test").setId("2").setSource("field2", "value1").get(); refresh(); + indexRandomForConcurrentSearch("test"); BoolQueryBuilder boolQuery = boolQuery().must(termQuery("field1", "value1")) .should(boolQuery().should(termQuery("field1", "value1")).should(termQuery("field1", "value2")).minimumShouldMatch(3)); SearchResponse searchResponse = client().prepareSearch().setQuery(boolQuery).get(); @@ -991,12 +1016,13 @@ public void testBoolQueryMinShouldMatchBiggerThanNumberOfShouldClauses() throws assertHitCount(searchResponse, 0L); } - public void testFuzzyQueryString() { + public void testFuzzyQueryString() throws InterruptedException { createIndex("test"); client().prepareIndex("test").setId("1").setSource("str", "foobar", "date", "2012-02-01", "num", 12).get(); client().prepareIndex("test").setId("2").setSource("str", "fred", "date", "2012-02-05", "num", 20).get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(queryStringQuery("str:foobaz~1")).get(); assertNoFailures(searchResponse); assertHitCount(searchResponse, 1L); @@ -1015,6 +1041,7 @@ public void testQuotedQueryStringWithBoost() throws InterruptedException { client().prepareIndex("test").setId("2").setSource("important", "nothing important", "less_important", "phrase match") ); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery(queryStringQuery("\"phrase match\"").field("important", boost).field("less_important")) .get(); @@ -1027,11 +1054,12 @@ public void testQuotedQueryStringWithBoost() throws InterruptedException { ); } - public void testSpecialRangeSyntaxInQueryString() { + public void testSpecialRangeSyntaxInQueryString() throws InterruptedException { createIndex("test"); client().prepareIndex("test").setId("1").setSource("str", "foobar", "date", "2012-02-01", "num", 12).get(); client().prepareIndex("test").setId("2").setSource("str", "fred", "date", "2012-02-05", "num", 20).get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(queryStringQuery("num:>19")).get(); assertHitCount(searchResponse, 1L); @@ -1137,6 +1165,7 @@ public void testTermsQuery() throws Exception { public void testTermsLookupFilter() throws Exception { assertAcked(prepareCreate("lookup").setMapping("terms", "type=text", "other", "type=text")); + indexRandomForConcurrentSearch("lookup"); assertAcked( prepareCreate("lookup2").setMapping( jsonBuilder().startObject() @@ -1152,8 +1181,11 @@ public void testTermsLookupFilter() throws Exception { .endObject() ) ); + indexRandomForConcurrentSearch("lookup2"); assertAcked(prepareCreate("lookup3").setMapping("_source", "enabled=false", "terms", "type=text")); + indexRandomForConcurrentSearch("lookup3"); assertAcked(prepareCreate("test").setMapping("term", "type=text")); + indexRandomForConcurrentSearch("test"); indexRandom( true, @@ -1279,6 +1311,7 @@ public void testBasicQueryById() throws Exception { client().prepareIndex("test").setId("3").setSource("field1", "value3").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(idsQuery().addIds("1", "2")).get(); assertHitCount(searchResponse, 2L); assertThat(searchResponse.getHits().getHits().length, equalTo(2)); @@ -1333,6 +1366,7 @@ public void testNumericTermsAndRanges() throws Exception { .setSource("num_byte", 17, "num_short", 17, "num_integer", 17, "num_long", 17, "num_float", 17, "num_double", 17) .get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse; logger.info("--> term query on 1"); @@ -1439,6 +1473,7 @@ public void testNumericRangeFilter_2826() throws Exception { client().prepareIndex("test").setId("3").setSource("field1", "test2", "num_long", 3).get(); client().prepareIndex("test").setId("4").setSource("field1", "test2", "num_long", 4).get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test") .setPostFilter(boolQuery().should(rangeQuery("num_long").from(1).to(2)).should(rangeQuery("num_long").from(3).to(4))) @@ -1535,7 +1570,7 @@ public void testSimpleSpan() throws IOException, ExecutionException, Interrupted assertHitCount(searchResponse, 3L); } - public void testSpanMultiTermQuery() throws IOException { + public void testSpanMultiTermQuery() throws IOException, InterruptedException { createIndex("test"); client().prepareIndex("test").setId("1").setSource("description", "foo other anything bar", "count", 1).get(); @@ -1543,6 +1578,7 @@ public void testSpanMultiTermQuery() throws IOException { client().prepareIndex("test").setId("3").setSource("description", "foo other", "count", 3).get(); client().prepareIndex("test").setId("4").setSource("description", "fop", "count", 4).get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test") .setQuery(spanOrQuery(spanMultiTermQueryBuilder(fuzzyQuery("description", "fop")))) @@ -1574,6 +1610,7 @@ public void testSpanNot() throws IOException, ExecutionException, InterruptedExc client().prepareIndex("test").setId("1").setSource("description", "the quick brown fox jumped over the lazy dog").get(); client().prepareIndex("test").setId("2").setSource("description", "the quick black fox leaped over the sleeping dog").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test") .setQuery( @@ -1612,7 +1649,7 @@ public void testSpanNot() throws IOException, ExecutionException, InterruptedExc assertHitCount(searchResponse, 1L); } - public void testSimpleDFSQuery() throws IOException { + public void testSimpleDFSQuery() throws IOException, InterruptedException { assertAcked( prepareCreate("test").setMapping( jsonBuilder().startObject() @@ -1657,6 +1694,7 @@ public void testSimpleDFSQuery() throws IOException { .setSource("online", true, "ts", System.currentTimeMillis() - 123123, "type", "bs") .get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test") .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) @@ -1679,8 +1717,9 @@ public void testSimpleDFSQuery() throws IOException { assertNoFailures(response); } - public void testMultiFieldQueryString() { + public void testMultiFieldQueryString() throws InterruptedException { client().prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value2").setRefreshPolicy(IMMEDIATE).get(); + indexRandomForConcurrentSearch("test"); logger.info("regular"); assertHitCount(client().prepareSearch("test").setQuery(queryStringQuery("value1").field("field1").field("field2")).get(), 1); @@ -1700,11 +1739,12 @@ public void testMultiFieldQueryString() { } // see #3797 - public void testMultiMatchLenientIssue3797() { + public void testMultiMatchLenientIssue3797() throws InterruptedException { createIndex("test"); client().prepareIndex("test").setId("1").setSource("field1", 123, "field2", "value2").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test") .setQuery(multiMatchQuery("value2", "field2").field("field1", 2).lenient(true)) @@ -1728,6 +1768,7 @@ public void testMinScore() throws ExecutionException, InterruptedException { client().prepareIndex("test").setId("3").setSource("score", 2.0).get(); client().prepareIndex("test").setId("4").setSource("score", 0.5).get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test") .setQuery(functionScoreQuery(ScoreFunctionBuilders.fieldValueFactorFunction("score").missing(1.0)).setMinScore(1.5f)) @@ -1737,12 +1778,13 @@ public void testMinScore() throws ExecutionException, InterruptedException { assertSecondHit(searchResponse, hasId("1")); } - public void testQueryStringWithSlopAndFields() { + public void testQueryStringWithSlopAndFields() throws InterruptedException { assertAcked(prepareCreate("test")); client().prepareIndex("test").setId("1").setSource("desc", "one two three", "type", "customer").get(); client().prepareIndex("test").setId("2").setSource("desc", "one two three", "type", "product").get(); refresh(); + indexRandomForConcurrentSearch("test"); { SearchResponse searchResponse = client().prepareSearch("test") .setQuery(QueryBuilders.queryStringQuery("\"one two\"").defaultField("desc")) @@ -1809,6 +1851,7 @@ public void testRangeQueryWithTimeZone() throws Exception { .setId("4") .setSource("date", Instant.now().atZone(ZoneOffset.ofHours(1)).toInstant().toEpochMilli(), "num", 4) ); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test") .setQuery(QueryBuilders.rangeQuery("date").from("2014-01-01T00:00:00").to("2014-01-01T00:59:00")) @@ -1916,11 +1959,12 @@ public void testRangeQueryWithLocaleMapping() throws Exception { assertHitCount(searchResponse, 2L); } - public void testSearchEmptyDoc() { + public void testSearchEmptyDoc() throws InterruptedException { assertAcked(prepareCreate("test").setSettings("{\"index.analysis.analyzer.default.type\":\"keyword\"}", MediaTypeRegistry.JSON)); client().prepareIndex("test").setId("1").setSource("{}", MediaTypeRegistry.JSON).get(); refresh(); + indexRandomForConcurrentSearch("test"); assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 1L); } @@ -1948,6 +1992,7 @@ public void testMatchPhrasePrefixQuery() throws ExecutionException, InterruptedE public void testQueryStringParserCache() throws Exception { createIndex("test"); indexRandom(true, false, client().prepareIndex("test").setId("1").setSource("nameTokens", "xyz")); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test") .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) @@ -1978,6 +2023,7 @@ public void testRangeQueryRangeFields_24744() throws Exception { .setSource(jsonBuilder().startObject().startObject("int_range").field("gte", 10).field("lte", 20).endObject().endObject()) .get(); refresh(); + indexRandomForConcurrentSearch("test"); RangeQueryBuilder range = new RangeQueryBuilder("int_range").relation("intersects").from(Integer.MIN_VALUE).to(Integer.MAX_VALUE); SearchResponse searchResponse = client().prepareSearch("test").setQuery(range).get(); @@ -2013,6 +2059,7 @@ public void testNestedQueryWithFieldAlias() throws Exception { index("index", "_doc", "1", source); refresh(); + indexRandomForConcurrentSearch("index"); QueryBuilder nestedQuery = QueryBuilders.nestedQuery( "section", @@ -2041,6 +2088,7 @@ public void testFieldAliasesForMetaFields() throws Exception { IndexRequestBuilder indexRequest = client().prepareIndex("test").setId("1").setRouting("custom").setSource("field", "value"); indexRandom(true, false, indexRequest); + indexRandomForConcurrentSearch("test"); client().admin() .cluster() .prepareUpdateSettings() @@ -2073,7 +2121,7 @@ public void testFieldAliasesForMetaFields() throws Exception { /** * Test that wildcard queries on keyword fields get normalized */ - public void testWildcardQueryNormalizationOnKeywordField() { + public void testWildcardQueryNormalizationOnKeywordField() throws InterruptedException { assertAcked( prepareCreate("test").setSettings( Settings.builder() @@ -2084,6 +2132,7 @@ public void testWildcardQueryNormalizationOnKeywordField() { ); client().prepareIndex("test").setId("1").setSource("field1", "Bbb Aaa").get(); refresh(); + indexRandomForConcurrentSearch("test"); { WildcardQueryBuilder wildCardQuery = wildcardQuery("field1", "Bb*"); @@ -2099,7 +2148,7 @@ public void testWildcardQueryNormalizationOnKeywordField() { /** * Test that wildcard queries on text fields get normalized */ - public void testWildcardQueryNormalizationOnTextField() { + public void testWildcardQueryNormalizationOnTextField() throws InterruptedException { assertAcked( prepareCreate("test").setSettings( Settings.builder() @@ -2111,6 +2160,7 @@ public void testWildcardQueryNormalizationOnTextField() { ); client().prepareIndex("test").setId("1").setSource("field1", "Bbb Aaa").get(); refresh(); + indexRandomForConcurrentSearch("test"); { // test default case insensitivity: false @@ -2130,10 +2180,11 @@ public void testWildcardQueryNormalizationOnTextField() { } /** tests wildcard case sensitivity */ - public void testWildcardCaseSensitivity() { + public void testWildcardCaseSensitivity() throws InterruptedException { assertAcked(prepareCreate("test").setMapping("field", "type=text")); client().prepareIndex("test").setId("1").setSource("field", "lowercase text").get(); refresh(); + indexRandomForConcurrentSearch("test"); // test case sensitive SearchResponse response = client().prepareSearch("test").setQuery(wildcardQuery("field", "Text").caseInsensitive(false)).get(); @@ -2151,7 +2202,7 @@ public void testWildcardCaseSensitivity() { * Reserved characters should be excluded when the normalization is applied for keyword fields. * See https://github.com/elastic/elasticsearch/issues/46300 for details. */ - public void testWildcardQueryNormalizationKeywordSpecialCharacters() { + public void testWildcardQueryNormalizationKeywordSpecialCharacters() throws InterruptedException { assertAcked( prepareCreate("test").setSettings( Settings.builder() @@ -2163,6 +2214,7 @@ public void testWildcardQueryNormalizationKeywordSpecialCharacters() { ); client().prepareIndex("test").setId("1").setSource("field", "label-1").get(); refresh(); + indexRandomForConcurrentSearch("test"); WildcardQueryBuilder wildCardQuery = wildcardQuery("field", "la*"); SearchResponse searchResponse = client().prepareSearch().setQuery(wildCardQuery).get(); @@ -2213,11 +2265,12 @@ public Map> getTokenizers() { * set for fuzzy queries with "constant_score" rewrite nested inside a `span_multi` query and would cause NPEs due to an unset * {@link AttributeSource}. */ - public void testIssueFuzzyInsideSpanMulti() { + public void testIssueFuzzyInsideSpanMulti() throws InterruptedException { createIndex("test"); client().prepareIndex("test").setId("1").setSource("field", "foobarbaz").get(); ensureGreen(); refresh(); + indexRandomForConcurrentSearch("test"); BoolQueryBuilder query = boolQuery().filter(spanMultiTermQueryBuilder(fuzzyQuery("field", "foobarbiz").rewrite("constant_score"))); SearchResponse response = client().prepareSearch("test").setQuery(query).get(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/SimpleQueryStringIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/SimpleQueryStringIT.java index 017d28ef3a2a6..d8902238005da 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/SimpleQueryStringIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/SimpleQueryStringIT.java @@ -437,6 +437,7 @@ public void testSimpleQueryStringOnIndexMetaField() throws Exception { client().prepareIndex("test").setId("2").setSource("foo", 234, "bar", "bcd").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(simpleQueryStringQuery("test").field("_index")).get(); assertHitCount(searchResponse, 2L); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/scriptfilter/ScriptQuerySearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/scriptfilter/ScriptQuerySearchIT.java index 34967528f2c4f..ae00904f237a5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/scriptfilter/ScriptQuerySearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/scriptfilter/ScriptQuerySearchIT.java @@ -161,6 +161,7 @@ public void testCustomScriptBinaryField() throws Exception { .get(); flush(); refresh(); + indexRandomForConcurrentSearch("my-index"); SearchResponse response = client().prepareSearch() .setQuery( @@ -213,6 +214,7 @@ public void testCustomScriptBoost() throws Exception { .setSource(jsonBuilder().startObject().field("test", "value beck").field("num1", 3.0f).endObject()) .get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("running doc['num1'].value > 1"); SearchResponse response = client().prepareSearch() @@ -259,7 +261,7 @@ public void testCustomScriptBoost() throws Exception { assertThat(response.getHits().getAt(2).getFields().get("sNum1").getValues().get(0), equalTo(3.0)); } - public void testDisallowExpensiveQueries() { + public void testDisallowExpensiveQueries() throws InterruptedException { try { assertAcked(prepareCreate("test-index").setMapping("num1", "type=double")); int docCount = 10; @@ -267,6 +269,7 @@ public void testDisallowExpensiveQueries() { client().prepareIndex("test-index").setId("" + i).setSource("num1", i).get(); } refresh(); + indexRandomForConcurrentSearch("test-index"); // Execute with search.allow_expensive_queries = null => default value = false => success Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value > 1", Collections.emptyMap()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollIT.java b/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollIT.java index 0eee136acac69..b82048ffc924e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollIT.java @@ -135,6 +135,7 @@ public void testSimpleScrollQueryThenFetch() throws Exception { } client().admin().indices().prepareRefresh().get(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery(matchAllQuery()) @@ -188,6 +189,7 @@ public void testSimpleScrollQueryThenFetchSmallSizeUnevenDistribution() throws E } client().admin().indices().prepareRefresh().get(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setSearchType(SearchType.QUERY_THEN_FETCH) @@ -256,6 +258,7 @@ public void testScrollAndUpdateIndex() throws Exception { } client().admin().indices().prepareRefresh().get(); + indexRandomForConcurrentSearch("test"); assertThat(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get().getHits().getTotalHits().value, equalTo(500L)); assertThat( @@ -328,6 +331,7 @@ public void testSimpleScrollQueryThenFetch_clearScrollIds() throws Exception { } client().admin().indices().prepareRefresh().get(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse1 = client().prepareSearch() .setQuery(matchAllQuery()) @@ -448,6 +452,7 @@ public void testSimpleScrollQueryThenFetchClearAllScrollIds() throws Exception { } client().admin().indices().prepareRefresh().get(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse1 = client().prepareSearch() .setQuery(matchAllQuery()) @@ -526,6 +531,7 @@ public void testDeepScrollingDoesNotBlowUp() throws Exception { .prepareUpdateSettings("index") .setSettings(Settings.builder().put(IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), Integer.MAX_VALUE)) .get(); + indexRandomForConcurrentSearch("index"); for (SearchType searchType : SearchType.values()) { SearchRequestBuilder builder = client().prepareSearch("index") @@ -567,6 +573,7 @@ public void testStringSortMissingAscTerminates() throws Exception { ); client().prepareIndex("test").setId("1").setSource("some_field", "test").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test") .addSort(new FieldSortBuilder("no_field").order(SortOrder.ASC).missing("_last")) @@ -592,12 +599,13 @@ public void testStringSortMissingAscTerminates() throws Exception { assertThat(response.getHits().getHits().length, equalTo(0)); } - public void testCloseAndReopenOrDeleteWithActiveScroll() { + public void testCloseAndReopenOrDeleteWithActiveScroll() throws InterruptedException { createIndex("test"); for (int i = 0; i < 100; i++) { client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", i).get(); } refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery(matchAllQuery()) .setSize(35) @@ -683,7 +691,7 @@ public void testScrollInvalidDefaultKeepAlive() throws IOException { assertThat(exc.getMessage(), containsString("was (1m > 30s)")); } - public void testInvalidScrollKeepAlive() throws IOException { + public void testInvalidScrollKeepAlive() throws IOException, InterruptedException { createIndex("test"); for (int i = 0; i < 2; i++) { client().prepareIndex("test") @@ -692,6 +700,7 @@ public void testInvalidScrollKeepAlive() throws IOException { .get(); } refresh(); + indexRandomForConcurrentSearch("test"); assertAcked( client().admin() .cluster() @@ -733,7 +742,7 @@ public void testInvalidScrollKeepAlive() throws IOException { * Ensures that we always create and retain search contexts on every target shards for a scroll request * regardless whether that query can be written to match_no_docs on some target shards or not. */ - public void testScrollRewrittenToMatchNoDocs() { + public void testScrollRewrittenToMatchNoDocs() throws InterruptedException { final int numShards = randomIntBetween(3, 5); assertAcked( client().admin() @@ -746,6 +755,7 @@ public void testScrollRewrittenToMatchNoDocs() { client().prepareIndex("test").setId("2").setSource("created_date", "2020-01-02").get(); client().prepareIndex("test").setId("3").setSource("created_date", "2020-01-03").get(); client().admin().indices().prepareRefresh("test").get(); + indexRandomForConcurrentSearch("test"); SearchResponse resp = null; try { int totalHits = 0; @@ -793,6 +803,7 @@ public void testRestartDataNodesDuringScrollSearch() throws Exception { index("prod", "_doc", "prod-" + i, Collections.emptyMap()); } client().admin().indices().prepareRefresh().get(); + indexRandomForConcurrentSearch("demo", "prod"); SearchResponse respFromDemoIndex = client().prepareSearch("demo") .setSize(randomIntBetween(1, 10)) .setQuery(new MatchAllQueryBuilder()) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java b/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java index bee242b933dfd..81e948640ee94 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java @@ -154,7 +154,7 @@ protected Collection> nodePlugins() { return Arrays.asList(InternalSettingsPlugin.class, CustomScriptPlugin.class); } - public void testIssue8226() { + public void testIssue8226() throws InterruptedException { int numIndices = between(5, 10); final boolean useMapping = randomBoolean(); for (int i = 0; i < numIndices; i++) { @@ -168,6 +168,9 @@ public void testIssue8226() { } } refresh(); + for (int i = 0; i < numIndices; i++) { + indexRandomForConcurrentSearch("test_" + i); + } // sort DESC SearchResponse searchResponse = client().prepareSearch() .addSort(new FieldSortBuilder("entry").order(SortOrder.DESC).unmappedType(useMapping ? null : "long")) @@ -278,6 +281,7 @@ public void testTrackScores() throws Exception { jsonBuilder().startObject().field("id", "2").field("svalue", "bbb").field("ivalue", 200).field("dvalue", 0.2).endObject() ); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addSort("svalue", SortOrder.ASC).get(); @@ -446,6 +450,7 @@ public void testScoreSortDirection() throws Exception { client().prepareIndex("test").setId("3").setSource("field", 0).get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test") .setQuery(QueryBuilders.functionScoreQuery(matchAllQuery(), ScoreFunctionBuilders.fieldValueFactorFunction("field"))) @@ -484,6 +489,7 @@ public void testScoreSortDirectionWithFunctionScore() throws Exception { client().prepareIndex("test").setId("3").setSource("field", 0).get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test") .setQuery(functionScoreQuery(matchAllQuery(), fieldValueFactorFunction("field"))) @@ -531,7 +537,7 @@ public void testIssue2986() { } } - public void testIssue2991() { + public void testIssue2991() throws InterruptedException { for (int i = 1; i < 4; i++) { try { client().admin().indices().prepareDelete("test").get(); @@ -552,6 +558,7 @@ public void testIssue2991() { client().prepareIndex("test").setId("2").setSource("tag", "beta").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse resp = client().prepareSearch("test") .setSize(2) .setQuery(matchAllQuery()) @@ -605,6 +612,9 @@ public void testSimpleSorts() throws Exception { .startObject("float_value") .field("type", "float") .endObject() + .startObject("half_float_value") + .field("type", "half_float") + .endObject() .startObject("double_value") .field("type", "double") .endObject() @@ -628,6 +638,7 @@ public void testSimpleSorts() throws Exception { .field("long_value", i) .field("unsigned_long_value", UNSIGNED_LONG_BASE.add(BigInteger.valueOf(10000 * i))) .field("float_value", 0.1 * i) + .field("half_float_value", 0.1 * i) .field("double_value", 0.1 * i) .endObject() ); @@ -646,6 +657,7 @@ public void testSimpleSorts() throws Exception { } refresh(); + indexRandomForConcurrentSearch("test"); // STRING int size = 1 + random.nextInt(10); @@ -794,6 +806,28 @@ public void testSimpleSorts() throws Exception { assertThat(searchResponse.toString(), not(containsString("error"))); + // HALF_FLOAT + size = 1 + random.nextInt(10); + searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("half_float_value", SortOrder.ASC).get(); + + assertHitCount(searchResponse, 10L); + assertThat(searchResponse.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); + } + + assertThat(searchResponse.toString(), not(containsString("error"))); + size = 1 + random.nextInt(10); + searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("half_float_value", SortOrder.DESC).get(); + + assertHitCount(searchResponse, 10); + assertThat(searchResponse.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(9 - i))); + } + + assertThat(searchResponse.toString(), not(containsString("error"))); + // DOUBLE size = 1 + random.nextInt(10); searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("double_value", SortOrder.ASC).get(); @@ -896,6 +930,7 @@ public void testSortMissingNumbers() throws Exception { flush(); refresh(); + indexRandomForConcurrentSearch("test"); // DOUBLE logger.info("--> sort with no missing (same as missing _last)"); @@ -1057,6 +1092,7 @@ public void testSortMissingNumbersMinMax() throws Exception { flush(); refresh(); + indexRandomForConcurrentSearch("test"); // LONG logger.info("--> sort with no missing (same as missing _last)"); @@ -1176,7 +1212,7 @@ public void testSortMissingNumbersMinMax() throws Exception { assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); } - public void testSortMissingStrings() throws IOException { + public void testSortMissingStrings() throws IOException, InterruptedException { assertAcked( prepareCreate("test").setMapping( XContentFactory.jsonBuilder() @@ -1204,6 +1240,7 @@ public void testSortMissingStrings() throws IOException { flush(); refresh(); + indexRandomForConcurrentSearch("test"); // TODO: WTF? try { @@ -1330,6 +1367,9 @@ public void testSortMVField() throws Exception { .startObject("float_values") .field("type", "float") .endObject() + .startObject("half_float_values") + .field("type", "float") + .endObject() .startObject("double_values") .field("type", "double") .endObject() @@ -1351,6 +1391,7 @@ public void testSortMVField() throws Exception { .array("short_values", 1, 5, 10, 8) .array("byte_values", 1, 5, 10, 8) .array("float_values", 1f, 5f, 10f, 8f) + .array("half_float_values", 1f, 5f, 10f, 8f) .array("double_values", 1d, 5d, 10d, 8d) .array("string_values", "01", "05", "10", "08") .endObject() @@ -1365,6 +1406,7 @@ public void testSortMVField() throws Exception { .array("short_values", 11, 15, 20, 7) .array("byte_values", 11, 15, 20, 7) .array("float_values", 11f, 15f, 20f, 7f) + .array("half_float_values", 11f, 15f, 20f, 7f) .array("double_values", 11d, 15d, 20d, 7d) .array("string_values", "11", "15", "20", "07") .endObject() @@ -1379,6 +1421,7 @@ public void testSortMVField() throws Exception { .array("short_values", 2, 1, 3, -4) .array("byte_values", 2, 1, 3, -4) .array("float_values", 2f, 1f, 3f, -4f) + .array("half_float_values", 2f, 1f, 3f, -4f) .array("double_values", 2d, 1d, 3d, -4d) .array("string_values", "02", "01", "03", "!4") .endObject() @@ -1386,6 +1429,7 @@ public void testSortMVField() throws Exception { .get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery(matchAllQuery()) @@ -1585,6 +1629,34 @@ public void testSortMVField() throws Exception { assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).floatValue(), equalTo(3f)); + searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("half_float_values", SortOrder.ASC).get(); + + assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); + assertThat(searchResponse.getHits().getHits().length, equalTo(3)); + + assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(3))); + assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).floatValue(), equalTo(-4f)); + + assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).floatValue(), equalTo(1f)); + + assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(2))); + assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).floatValue(), equalTo(7f)); + + searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("half_float_values", SortOrder.DESC).get(); + + assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); + assertThat(searchResponse.getHits().getHits().length, equalTo(3)); + + assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); + assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).floatValue(), equalTo(20f)); + + assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).floatValue(), equalTo(10f)); + + assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); + assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).floatValue(), equalTo(3f)); + searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("double_values", SortOrder.ASC).get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); @@ -1642,7 +1714,7 @@ public void testSortMVField() throws Exception { assertThat(searchResponse.getHits().getAt(2).getSortValues()[0], equalTo("03")); } - public void testSortOnRareField() throws IOException { + public void testSortOnRareField() throws IOException, InterruptedException { assertAcked( prepareCreate("test").setMapping( XContentFactory.jsonBuilder() @@ -1662,6 +1734,7 @@ public void testSortOnRareField() throws IOException { .get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery(matchAllQuery()) .setSize(3) @@ -1757,6 +1830,7 @@ public void testSortMetaField() throws Exception { indexReqs[i] = client().prepareIndex("test").setId(Integer.toString(i)).setSource(); } indexRandom(true, indexReqs); + indexRandomForConcurrentSearch("test"); SortOrder order = randomFrom(SortOrder.values()); SearchResponse searchResponse = client().prepareSearch() @@ -1859,6 +1933,7 @@ public void testNestedSort() throws IOException, InterruptedException, Execution ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); // We sort on nested field SearchResponse searchResponse = client().prepareSearch() @@ -2044,6 +2119,7 @@ public void testFieldAlias() throws Exception { builders.add(client().prepareIndex("old_index").setSource("distance", 50.5)); builders.add(client().prepareIndex("new_index").setSource("route_length_miles", 100.2)); indexRandom(true, true, builders); + indexRandomForConcurrentSearch("old_index", "new_index"); SearchResponse response = client().prepareSearch() .setQuery(matchAllQuery()) @@ -2070,6 +2146,7 @@ public void testFieldAliasesWithMissingValues() throws Exception { builders.add(client().prepareIndex("old_index").setSource(Collections.emptyMap())); builders.add(client().prepareIndex("new_index").setSource("route_length_miles", 100.2)); indexRandom(true, true, builders); + indexRandomForConcurrentSearch("old_index", "new_index"); SearchResponse response = client().prepareSearch() .setQuery(matchAllQuery()) @@ -2139,6 +2216,7 @@ public void testCastDate() throws Exception { builders.add(client().prepareIndex("index_date").setSource("field", "2024-04-11T23:47:17")); builders.add(client().prepareIndex("index_date_nanos").setSource("field", "2024-04-11T23:47:16.854775807Z")); indexRandom(true, true, builders); + indexRandomForConcurrentSearch("index_date", "index_date_nanos"); { SearchResponse response = client().prepareSearch() @@ -2264,7 +2342,7 @@ public void testCastNumericTypeExceptions() throws Exception { } } - public void testLongSortOptimizationCorrectResults() { + public void testLongSortOptimizationCorrectResults() throws InterruptedException { assertAcked( prepareCreate("test1").setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2)) .setMapping("long_field", "type=long") @@ -2281,6 +2359,7 @@ public void testLongSortOptimizationCorrectResults() { bulkBuilder.add(client().prepareIndex("test1").setId(Integer.toString(i)).setSource(source, MediaTypeRegistry.JSON)); } refresh(); + indexRandomForConcurrentSearch("test1"); // *** 1. sort DESC on long_field SearchResponse searchResponse = client().prepareSearch() diff --git a/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceIT.java b/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceIT.java index 6886f8d67589e..766ac6139b24b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceIT.java @@ -192,6 +192,7 @@ public void testDistanceSortingMVFields() throws Exception { .get(); client().admin().indices().prepareRefresh().get(); + indexRandomForConcurrentSearch("test"); // Order: Asc SearchResponse searchResponse = client().prepareSearch("test") @@ -324,6 +325,7 @@ public void testDistanceSortingWithMissingGeoPoint() throws Exception { .get(); refresh(); + indexRandomForConcurrentSearch("test"); // Order: Asc SearchResponse searchResponse = client().prepareSearch("test") @@ -602,7 +604,7 @@ public void testDistanceSortingNestedFields() throws Exception { /** * Issue 3073 */ - public void testGeoDistanceFilter() throws IOException { + public void testGeoDistanceFilter() throws IOException, InterruptedException { Version version = VersionUtils.randomIndexCompatibleVersion(random()); Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); double lat = 40.720611; @@ -620,6 +622,7 @@ public void testGeoDistanceFilter() throws IOException { assertAcked(prepareCreate("locations").setSettings(settings).setMapping(mapping)); client().prepareIndex("locations").setId("1").setCreate(true).setSource(source).get(); refresh(); + indexRandomForConcurrentSearch("locations"); client().prepareGet("locations", "1").get(); SearchResponse result = client().prepareSearch("locations") @@ -668,6 +671,7 @@ public void testDistanceSortingWithUnmappedField() throws Exception { .get(); refresh(); + indexRandomForConcurrentSearch("test1", "test2"); // Order: Asc SearchResponse searchResponse = client().prepareSearch("test1", "test2") diff --git a/server/src/internalClusterTest/java/org/opensearch/search/sort/SortFromPluginIT.java b/server/src/internalClusterTest/java/org/opensearch/search/sort/SortFromPluginIT.java index 7bcded86fcaa8..76e68781c72ba 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/sort/SortFromPluginIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/sort/SortFromPluginIT.java @@ -59,6 +59,7 @@ public void testPluginSort() throws Exception { client().prepareIndex("test").setId("3").setSource("field", 0).get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test").addSort(new CustomSortBuilder("field", SortOrder.ASC)).get(); assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("3")); @@ -80,6 +81,7 @@ public void testPluginSortXContent() throws Exception { client().prepareIndex("test").setId("3").setSource("field", 0).get(); refresh(); + indexRandomForConcurrentSearch("test"); // builder -> json -> builder SearchResponse searchResponse = client().prepareSearch("test") diff --git a/server/src/internalClusterTest/java/org/opensearch/search/source/MetadataFetchingIT.java b/server/src/internalClusterTest/java/org/opensearch/search/source/MetadataFetchingIT.java index c98a38ea0bb97..a9c4bf841d9a1 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/source/MetadataFetchingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/source/MetadataFetchingIT.java @@ -76,12 +76,13 @@ protected Settings featureFlagSettings() { return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); } - public void testSimple() { + public void testSimple() throws InterruptedException { assertAcked(prepareCreate("test")); ensureGreen(); client().prepareIndex("test").setId("1").setSource("field", "value").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test").storedFields("_none_").setFetchSource(false).setVersion(true).get(); assertThat(response.getHits().getAt(0).getId(), nullValue()); @@ -93,12 +94,12 @@ public void testSimple() { assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); } - public void testInnerHits() { + public void testInnerHits() throws InterruptedException { assertAcked(prepareCreate("test").setMapping("nested", "type=nested")); ensureGreen(); client().prepareIndex("test").setId("1").setSource("field", "value", "nested", Collections.singletonMap("title", "foo")).get(); refresh(); - + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test") .storedFields("_none_") .setFetchSource(false) @@ -119,12 +120,13 @@ public void testInnerHits() { assertThat(hits.getAt(0).getSourceAsString(), nullValue()); } - public void testWithRouting() { + public void testWithRouting() throws InterruptedException { assertAcked(prepareCreate("test")); ensureGreen(); client().prepareIndex("test").setId("1").setSource("field", "value").setRouting("toto").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test").storedFields("_none_").setFetchSource(false).get(); assertThat(response.getHits().getAt(0).getId(), nullValue()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/source/SourceFetchingIT.java b/server/src/internalClusterTest/java/org/opensearch/search/source/SourceFetchingIT.java index eeef5403fe898..805e82dc9850b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/source/SourceFetchingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/source/SourceFetchingIT.java @@ -66,12 +66,13 @@ protected Settings featureFlagSettings() { return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); } - public void testSourceDefaultBehavior() { + public void testSourceDefaultBehavior() throws InterruptedException { createIndex("test"); ensureGreen(); index("test", "type1", "1", "field", "value"); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test").get(); assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()); @@ -84,12 +85,13 @@ public void testSourceDefaultBehavior() { } - public void testSourceFiltering() { + public void testSourceFiltering() throws InterruptedException { createIndex("test"); ensureGreen(); client().prepareIndex("test").setId("1").setSource("field1", "value", "field2", "value2").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test").setFetchSource(false).get(); assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); @@ -117,12 +119,13 @@ public void testSourceFiltering() { * Test Case for #5132: Source filtering with wildcards broken when given multiple patterns * https://github.com/elastic/elasticsearch/issues/5132 */ - public void testSourceWithWildcardFiltering() { + public void testSourceWithWildcardFiltering() throws InterruptedException { createIndex("test"); ensureGreen(); client().prepareIndex("test").setId("1").setSource("field", "value").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test").setFetchSource(new String[] { "*.notexisting", "field" }, null).get(); assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/suggest/SuggestSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/suggest/SuggestSearchIT.java index 32bb0e34054bb..bc6e49008bf6b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/suggest/SuggestSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/suggest/SuggestSearchIT.java @@ -287,6 +287,7 @@ public void testSizeOneShard() throws Exception { index("test", "type1", Integer.toString(i), "text", "abc" + i); } refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse search = client().prepareSearch().setQuery(matchQuery("text", "spellchecker")).get(); assertThat("didn't ask for suggestions but got some", search.getSuggest(), nullValue()); @@ -369,6 +370,7 @@ public void testSimple() throws Exception { index("test", "type1", "3", "text", "abbd"); index("test", "type1", "4", "text", "abcc"); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse search = client().prepareSearch().setQuery(matchQuery("text", "spellcecker")).get(); assertThat("didn't ask for suggestions but got some", search.getSuggest(), nullValue()); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java index 4478a3432e519..9a92ddc81852a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java @@ -458,7 +458,9 @@ private void testUpdateIndexSettingsOnlyNotAllowedSettings(String index) { private void testUpdateIndexSettingsOnlyAllowedSettings(String index) { final UpdateSettingsRequestBuilder builder = client().admin().indices().prepareUpdateSettings(index); - builder.setSettings(Map.of("index.max_result_window", 1000, "index.search.slowlog.threshold.query.warn", "10s")); + builder.setSettings( + Map.of("index.max_result_window", 1000, "index.search.slowlog.threshold.query.warn", "10s", "index.number_of_replicas", 0) + ); AcknowledgedResponse settingsResponse = builder.execute().actionGet(); assertThat(settingsResponse, notNullValue()); } diff --git a/server/src/main/java/org/opensearch/action/AliasesRequest.java b/server/src/main/java/org/opensearch/action/AliasesRequest.java index 3632ba2d7304f..8fe6f49c08af0 100644 --- a/server/src/main/java/org/opensearch/action/AliasesRequest.java +++ b/server/src/main/java/org/opensearch/action/AliasesRequest.java @@ -32,14 +32,17 @@ package org.opensearch.action; +import org.opensearch.common.annotation.PublicApi; + /** * Needs to be implemented by all {@link org.opensearch.action.ActionRequest} subclasses that relate to * one or more indices and one or more aliases. Meant to be used for aliases management requests (e.g. add/remove alias, * get aliases) that hold aliases and indices in separate fields. * Allows to retrieve which indices and aliases the action relates to. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface AliasesRequest extends IndicesRequest.Replaceable { /** diff --git a/server/src/main/java/org/opensearch/action/OriginalIndices.java b/server/src/main/java/org/opensearch/action/OriginalIndices.java index 1e24c64bc60fc..1c26bf9d416f5 100644 --- a/server/src/main/java/org/opensearch/action/OriginalIndices.java +++ b/server/src/main/java/org/opensearch/action/OriginalIndices.java @@ -33,6 +33,7 @@ package org.opensearch.action; import org.opensearch.action.support.IndicesOptions; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -42,8 +43,9 @@ /** * Used to keep track of original indices within internal (e.g. shard level) requests * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class OriginalIndices implements IndicesRequest { // constant to use when original indices are not applicable and will not be serialized across the wire diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java index f4705a21f5014..3c8f07613561d 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java @@ -40,6 +40,7 @@ import org.opensearch.cluster.routing.allocation.AllocationDecision; import org.opensearch.cluster.routing.allocation.ShardAllocationDecision; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -58,8 +59,9 @@ * or if it is not unassigned, then which nodes it could possibly be relocated to. * It is an immutable class. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class ClusterAllocationExplanation implements ToXContentObject, Writeable { private final ShardRouting shardRouting; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java index 874713b51d627..8293a5bb27612 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java @@ -58,6 +58,7 @@ import org.opensearch.monitor.process.ProcessStats; import org.opensearch.node.AdaptiveSelectionStats; import org.opensearch.node.NodesResourceUsageStats; +import org.opensearch.ratelimitting.admissioncontrol.stats.AdmissionControlStats; import org.opensearch.repositories.RepositoriesStats; import org.opensearch.script.ScriptCacheStats; import org.opensearch.script.ScriptStats; @@ -154,6 +155,9 @@ public class NodeStats extends BaseNodeResponse implements ToXContentFragment { @Nullable private RepositoriesStats repositoriesStats; + @Nullable + private AdmissionControlStats admissionControlStats; + public NodeStats(StreamInput in) throws IOException { super(in); timestamp = in.readVLong(); @@ -225,6 +229,12 @@ public NodeStats(StreamInput in) throws IOException { } else { repositoriesStats = null; } + // TODO: change to V_2_12_0 on main after backport to 2.x + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + admissionControlStats = in.readOptionalWriteable(AdmissionControlStats::new); + } else { + admissionControlStats = null; + } } public NodeStats( @@ -254,7 +264,8 @@ public NodeStats( @Nullable TaskCancellationStats taskCancellationStats, @Nullable SearchPipelineStats searchPipelineStats, @Nullable SegmentReplicationRejectionStats segmentReplicationRejectionStats, - @Nullable RepositoriesStats repositoriesStats + @Nullable RepositoriesStats repositoriesStats, + @Nullable AdmissionControlStats admissionControlStats ) { super(node); this.timestamp = timestamp; @@ -283,6 +294,7 @@ public NodeStats( this.searchPipelineStats = searchPipelineStats; this.segmentReplicationRejectionStats = segmentReplicationRejectionStats; this.repositoriesStats = repositoriesStats; + this.admissionControlStats = admissionControlStats; } public long getTimestamp() { @@ -435,6 +447,11 @@ public RepositoriesStats getRepositoriesStats() { return repositoriesStats; } + @Nullable + public AdmissionControlStats getAdmissionControlStats() { + return admissionControlStats; + } + @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -487,6 +504,10 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_2_12_0)) { out.writeOptionalWriteable(repositoriesStats); } + // TODO: change to V_2_12_0 on main after backport to 2.x + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeOptionalWriteable(admissionControlStats); + } } @Override @@ -587,6 +608,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (getRepositoriesStats() != null) { getRepositoriesStats().toXContent(builder, params); } + if (getAdmissionControlStats() != null) { + getAdmissionControlStats().toXContent(builder, params); + } return builder; } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java index 22a667e7e8f6f..1af56f10b95ee 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java @@ -218,7 +218,8 @@ public enum Metric { SEARCH_PIPELINE("search_pipeline"), RESOURCE_USAGE_STATS("resource_usage_stats"), SEGMENT_REPLICATION_BACKPRESSURE("segment_replication_backpressure"), - REPOSITORIES("repositories"); + REPOSITORIES("repositories"), + ADMISSION_CONTROL("admission_control"); private String metricName; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java index 99cf42cfdc4d0..1df73d3b4394d 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java @@ -127,7 +127,8 @@ protected NodeStats nodeOperation(NodeStatsRequest nodeStatsRequest) { NodesStatsRequest.Metric.SEARCH_PIPELINE.containedIn(metrics), NodesStatsRequest.Metric.RESOURCE_USAGE_STATS.containedIn(metrics), NodesStatsRequest.Metric.SEGMENT_REPLICATION_BACKPRESSURE.containedIn(metrics), - NodesStatsRequest.Metric.REPOSITORIES.containedIn(metrics) + NodesStatsRequest.Metric.REPOSITORIES.containedIn(metrics), + NodesStatsRequest.Metric.ADMISSION_CONTROL.containedIn(metrics) ); } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStatus.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStatus.java index 0b101070db478..e0f380b3ebbe6 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStatus.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStatus.java @@ -35,6 +35,7 @@ import org.opensearch.cluster.SnapshotsInProgress; import org.opensearch.cluster.SnapshotsInProgress.State; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -68,8 +69,9 @@ /** * Status of a snapshot * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SnapshotStatus implements ToXContentObject, Writeable { private final Snapshot snapshot; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java index 5efec8b876435..9c5dcc9e9de3f 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -171,6 +171,7 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq false, false, false, + false, false ); List shardsStats = new ArrayList<>(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverInfo.java b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverInfo.java index 475e44e1820d5..75c68350e2204 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverInfo.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverInfo.java @@ -34,6 +34,7 @@ import org.opensearch.cluster.AbstractDiffable; import org.opensearch.cluster.Diff; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -52,8 +53,9 @@ /** * Class for holding Rollover related information within an index * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RolloverInfo extends AbstractDiffable implements Writeable, ToXContentFragment { public static final ParseField CONDITION_FIELD = new ParseField("met_conditions"); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java index 1c57dc27df8c6..14c985f1d3427 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java @@ -55,6 +55,7 @@ import org.opensearch.transport.TransportService; import java.io.IOException; +import java.util.Arrays; import java.util.Set; import java.util.stream.Stream; @@ -77,7 +78,8 @@ public class TransportUpdateSettingsAction extends TransportClusterManagerNodeAc "index.max_script_fields", "index.max_terms_count", "index.max_regex_length", - "index.highlight.max_analyzed_offset" + "index.highlight.max_analyzed_offset", + "index.number_of_replicas" ); private final static String[] ALLOWLIST_REMOTE_SNAPSHOT_SETTINGS_PREFIXES = { "index.search.slowlog" }; @@ -145,10 +147,10 @@ protected ClusterBlockException checkBlock(UpdateSettingsRequest request, Cluste } } + final String[] requestIndexNames = Arrays.stream(requestIndices).map(Index::getName).toArray(String[]::new); return allowSearchableSnapshotSettingsUpdate ? null - : state.blocks() - .indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indexNameExpressionResolver.concreteIndexNames(state, request)); + : state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, requestIndexNames); } @Override diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkPrimaryExecutionContext.java b/server/src/main/java/org/opensearch/action/bulk/BulkPrimaryExecutionContext.java index 896456089ee3e..4e770f5851bc6 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkPrimaryExecutionContext.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkPrimaryExecutionContext.java @@ -232,6 +232,7 @@ public void resetForExecutionForRetry() { currentItemState = ItemProcessingState.INITIAL; requestToExecute = null; executionResult = null; + retryCounter++; assertInvariants(ItemProcessingState.INITIAL); } diff --git a/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java index 268a6ed6f85b8..b8517f53ff294 100644 --- a/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java +++ b/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java @@ -98,6 +98,7 @@ import org.opensearch.indices.IndicesService; import org.opensearch.indices.SystemIndices; import org.opensearch.node.NodeClosedException; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; import org.opensearch.tasks.Task; import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; @@ -180,7 +181,8 @@ public TransportShardBulkAction( false, indexingPressureService, systemIndices, - tracer + tracer, + AdmissionControlActionType.INDEXING ); this.updateHelper = updateHelper; this.mappingUpdatedAction = mappingUpdatedAction; diff --git a/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java index 14f57218ae1dc..f18bbb8a1cc13 100644 --- a/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java @@ -65,10 +65,10 @@ import java.util.Collections; import java.util.List; import java.util.Map; -import java.util.Optional; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Executor; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiFunction; @@ -115,13 +115,12 @@ abstract class AbstractSearchAsyncAction exten private final int maxConcurrentRequestsPerNode; private final Map pendingExecutionsPerNode = new ConcurrentHashMap<>(); private final boolean throttleConcurrentRequests; + private final SearchRequestContext searchRequestContext; private SearchPhase currentPhase; private final List releasables = new ArrayList<>(); - private Optional searchRequestOperationsListener; - AbstractSearchAsyncAction( String name, Logger logger, @@ -140,7 +139,7 @@ abstract class AbstractSearchAsyncAction exten SearchPhaseResults resultConsumer, int maxConcurrentRequestsPerNode, SearchResponse.Clusters clusters, - SearchRequestOperationsListener searchRequestOperationsListener + SearchRequestContext searchRequestContext ) { super(name); final List toSkipIterators = new ArrayList<>(); @@ -176,7 +175,7 @@ abstract class AbstractSearchAsyncAction exten this.indexRoutings = indexRoutings; this.results = resultConsumer; this.clusters = clusters; - this.searchRequestOperationsListener = Optional.ofNullable(searchRequestOperationsListener); + this.searchRequestContext = searchRequestContext; } @Override @@ -427,18 +426,26 @@ public final void executeNextPhase(SearchPhase currentPhase, SearchPhase nextPha clusterState.version() ); } - onPhaseEnd(); + onPhaseEnd(searchRequestContext); executePhase(nextPhase); } } - private void onPhaseEnd() { - this.searchRequestOperationsListener.ifPresent(searchRequestOperations -> { searchRequestOperations.onPhaseEnd(this); }); + private void onPhaseEnd(SearchRequestContext searchRequestContext) { + if (getCurrentPhase() != null) { + long tookInNanos = System.nanoTime() - getCurrentPhase().getStartTimeInNanos(); + searchRequestContext.updatePhaseTookMap(getCurrentPhase().getName(), TimeUnit.NANOSECONDS.toMillis(tookInNanos)); + } + this.searchRequestContext.getSearchRequestOperationsListener().onPhaseEnd(this, searchRequestContext); } private void onPhaseStart(SearchPhase phase) { setCurrentPhase(phase); - this.searchRequestOperationsListener.ifPresent(searchRequestOperations -> { searchRequestOperations.onPhaseStart(this); }); + this.searchRequestContext.getSearchRequestOperationsListener().onPhaseStart(this); + } + + private void onRequestEnd(SearchRequestContext searchRequestContext) { + this.searchRequestContext.getSearchRequestOperationsListener().onRequestEnd(this, searchRequestContext); } private void executePhase(SearchPhase phase) { @@ -696,15 +703,18 @@ public void sendSearchResponse(InternalSearchResponse internalSearchResponse, At searchContextId = null; } } + searchRequestContext.setTotalHits(internalSearchResponse.hits().getTotalHits()); + searchRequestContext.setShardStats(results.getNumShards(), successfulOps.get(), skippedOps.get(), failures.length); + onPhaseEnd(searchRequestContext); + onRequestEnd(searchRequestContext); listener.onResponse(buildSearchResponse(internalSearchResponse, failures, scrollId, searchContextId)); } - onPhaseEnd(); setCurrentPhase(null); } @Override public final void onPhaseFailure(SearchPhase phase, String msg, Throwable cause) { - this.searchRequestOperationsListener.ifPresent(searchRequestOperations -> searchRequestOperations.onPhaseFailure(this)); + this.searchRequestContext.getSearchRequestOperationsListener().onPhaseFailure(this); raisePhaseFailure(new SearchPhaseExecutionException(phase.getName(), msg, cause, buildShardFailures())); } diff --git a/server/src/main/java/org/opensearch/action/search/CanMatchPreFilterSearchPhase.java b/server/src/main/java/org/opensearch/action/search/CanMatchPreFilterSearchPhase.java index ae481736ad0aa..c693eea4a2c33 100644 --- a/server/src/main/java/org/opensearch/action/search/CanMatchPreFilterSearchPhase.java +++ b/server/src/main/java/org/opensearch/action/search/CanMatchPreFilterSearchPhase.java @@ -91,7 +91,7 @@ final class CanMatchPreFilterSearchPhase extends AbstractSearchAsyncAction, SearchPhase> phaseFactory, SearchResponse.Clusters clusters, - SearchRequestOperationsListener searchRequestOperationsListener + SearchRequestContext searchRequestContext ) { // We set max concurrent shard requests to the number of shards so no throttling happens for can_match requests super( @@ -112,7 +112,7 @@ final class CanMatchPreFilterSearchPhase extends AbstractSearchAsyncAction(shardsIts.size()), request.getMaxConcurrentShardRequests(), clusters, - searchRequestOperationsListener + searchRequestContext ); this.queryPhaseResultConsumer = queryPhaseResultConsumer; this.searchPhaseController = searchPhaseController; diff --git a/server/src/main/java/org/opensearch/action/search/SearchPhase.java b/server/src/main/java/org/opensearch/action/search/SearchPhase.java index 1c7b3c1f1563c..0890e9f5de8d4 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchPhase.java +++ b/server/src/main/java/org/opensearch/action/search/SearchPhase.java @@ -32,6 +32,7 @@ package org.opensearch.action.search; import org.opensearch.common.CheckedRunnable; +import org.opensearch.common.annotation.PublicApi; import java.io.IOException; import java.util.Locale; @@ -40,8 +41,9 @@ /** * Base class for all individual search phases like collecting distributed frequencies, fetching documents, querying shards. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class SearchPhase implements CheckedRunnable { private final String name; private long startTimeInNanos; diff --git a/server/src/main/java/org/opensearch/action/search/SearchPhaseContext.java b/server/src/main/java/org/opensearch/action/search/SearchPhaseContext.java index 45d39a6f85ea2..df451e0745e3c 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchPhaseContext.java +++ b/server/src/main/java/org/opensearch/action/search/SearchPhaseContext.java @@ -34,6 +34,7 @@ import org.apache.logging.log4j.Logger; import org.opensearch.action.OriginalIndices; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.InternalApi; import org.opensearch.common.lease.Releasable; import org.opensearch.common.util.concurrent.AtomicArray; import org.opensearch.search.SearchPhaseResult; @@ -50,6 +51,7 @@ * * @opensearch.internal */ +@InternalApi public interface SearchPhaseContext extends Executor { // TODO maybe we can make this concrete later - for now we just implement this in the base class for all initial phases diff --git a/server/src/main/java/org/opensearch/action/search/SearchQueryThenFetchAsyncAction.java b/server/src/main/java/org/opensearch/action/search/SearchQueryThenFetchAsyncAction.java index ca5ad087d3089..c26bd5eef8c15 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchQueryThenFetchAsyncAction.java +++ b/server/src/main/java/org/opensearch/action/search/SearchQueryThenFetchAsyncAction.java @@ -82,7 +82,7 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction phaseTookMap; + private TotalHits totalHits; + private final EnumMap shardStats; + + /** + * This constructor is for testing only + */ + SearchRequestContext() { + this(new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger())); + } + + SearchRequestContext(SearchRequestOperationsListener searchRequestOperationsListener) { + this.searchRequestOperationsListener = searchRequestOperationsListener; + this.absoluteStartNanos = System.nanoTime(); + this.phaseTookMap = new HashMap<>(); + this.shardStats = new EnumMap<>(ShardStatsFieldNames.class); + } + + SearchRequestOperationsListener getSearchRequestOperationsListener() { + return searchRequestOperationsListener; + } + + void updatePhaseTookMap(String phaseName, Long tookTime) { + this.phaseTookMap.put(phaseName, tookTime); + } + + Map phaseTookMap() { + return phaseTookMap; + } + + /** + * Override absoluteStartNanos set in constructor. + * For testing only + */ + void setAbsoluteStartNanos(long absoluteStartNanos) { + this.absoluteStartNanos = absoluteStartNanos; + } + + /** + * Request start time in nanos + */ + long getAbsoluteStartNanos() { + return absoluteStartNanos; + } + + void setTotalHits(TotalHits totalHits) { + this.totalHits = totalHits; + } + + TotalHits totalHits() { + return totalHits; + } + + void setShardStats(int total, int successful, int skipped, int failed) { + this.shardStats.put(ShardStatsFieldNames.SEARCH_REQUEST_SLOWLOG_SHARD_TOTAL, total); + this.shardStats.put(ShardStatsFieldNames.SEARCH_REQUEST_SLOWLOG_SHARD_SUCCESSFUL, successful); + this.shardStats.put(ShardStatsFieldNames.SEARCH_REQUEST_SLOWLOG_SHARD_SKIPPED, skipped); + this.shardStats.put(ShardStatsFieldNames.SEARCH_REQUEST_SLOWLOG_SHARD_FAILED, failed); + } + + String formattedShardStats() { + if (shardStats.isEmpty()) { + return ""; + } else { + return String.format( + Locale.ROOT, + "{%s:%s, %s:%s, %s:%s, %s:%s}", + ShardStatsFieldNames.SEARCH_REQUEST_SLOWLOG_SHARD_TOTAL.toString(), + shardStats.get(ShardStatsFieldNames.SEARCH_REQUEST_SLOWLOG_SHARD_TOTAL), + ShardStatsFieldNames.SEARCH_REQUEST_SLOWLOG_SHARD_SUCCESSFUL.toString(), + shardStats.get(ShardStatsFieldNames.SEARCH_REQUEST_SLOWLOG_SHARD_SUCCESSFUL), + ShardStatsFieldNames.SEARCH_REQUEST_SLOWLOG_SHARD_SKIPPED.toString(), + shardStats.get(ShardStatsFieldNames.SEARCH_REQUEST_SLOWLOG_SHARD_SKIPPED), + ShardStatsFieldNames.SEARCH_REQUEST_SLOWLOG_SHARD_FAILED.toString(), + shardStats.get(ShardStatsFieldNames.SEARCH_REQUEST_SLOWLOG_SHARD_FAILED) + ); + } + } +} + +enum ShardStatsFieldNames { + SEARCH_REQUEST_SLOWLOG_SHARD_TOTAL("total"), + SEARCH_REQUEST_SLOWLOG_SHARD_SUCCESSFUL("successful"), + SEARCH_REQUEST_SLOWLOG_SHARD_SKIPPED("skipped"), + SEARCH_REQUEST_SLOWLOG_SHARD_FAILED("failed"); + + private final String name; + + ShardStatsFieldNames(String name) { + this.name = name; + } + + @Override + public String toString() { + return this.name; + } +} diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestOperationsListener.java b/server/src/main/java/org/opensearch/action/search/SearchRequestOperationsListener.java index 89d725b56bded..19ce0beb3c493 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequestOperationsListener.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestOperationsListener.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.common.annotation.InternalApi; import java.util.List; @@ -18,13 +19,18 @@ * * @opensearch.internal */ -public interface SearchRequestOperationsListener { +@InternalApi +abstract class SearchRequestOperationsListener { - void onPhaseStart(SearchPhaseContext context); + abstract void onPhaseStart(SearchPhaseContext context); - void onPhaseEnd(SearchPhaseContext context); + abstract void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext); - void onPhaseFailure(SearchPhaseContext context); + abstract void onPhaseFailure(SearchPhaseContext context); + + void onRequestStart(SearchRequestContext searchRequestContext) {} + + void onRequestEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) {} /** * Holder of Composite Listeners @@ -32,17 +38,17 @@ public interface SearchRequestOperationsListener { * @opensearch.internal */ - final class CompositeListener implements SearchRequestOperationsListener { + static final class CompositeListener extends SearchRequestOperationsListener { private final List listeners; private final Logger logger; - public CompositeListener(List listeners, Logger logger) { + CompositeListener(List listeners, Logger logger) { this.listeners = listeners; this.logger = logger; } @Override - public void onPhaseStart(SearchPhaseContext context) { + void onPhaseStart(SearchPhaseContext context) { for (SearchRequestOperationsListener listener : listeners) { try { listener.onPhaseStart(context); @@ -53,10 +59,10 @@ public void onPhaseStart(SearchPhaseContext context) { } @Override - public void onPhaseEnd(SearchPhaseContext context) { + void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { for (SearchRequestOperationsListener listener : listeners) { try { - listener.onPhaseEnd(context); + listener.onPhaseEnd(context, searchRequestContext); } catch (Exception e) { logger.warn(() -> new ParameterizedMessage("onPhaseEnd listener [{}] failed", listener), e); } @@ -64,7 +70,7 @@ public void onPhaseEnd(SearchPhaseContext context) { } @Override - public void onPhaseFailure(SearchPhaseContext context) { + void onPhaseFailure(SearchPhaseContext context) { for (SearchRequestOperationsListener listener : listeners) { try { listener.onPhaseFailure(context); @@ -73,5 +79,27 @@ public void onPhaseFailure(SearchPhaseContext context) { } } } + + @Override + void onRequestStart(SearchRequestContext searchRequestContext) { + for (SearchRequestOperationsListener listener : listeners) { + try { + listener.onRequestStart(searchRequestContext); + } catch (Exception e) { + logger.warn(() -> new ParameterizedMessage("onRequestStart listener [{}] failed", listener), e); + } + } + } + + @Override + public void onRequestEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { + for (SearchRequestOperationsListener listener : listeners) { + try { + listener.onRequestEnd(context, searchRequestContext); + } catch (Exception e) { + logger.warn(() -> new ParameterizedMessage("onRequestEnd listener [{}] failed", listener), e); + } + } + } } } diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestSlowLog.java b/server/src/main/java/org/opensearch/action/search/SearchRequestSlowLog.java new file mode 100644 index 0000000000000..a55cfd463a78f --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestSlowLog.java @@ -0,0 +1,273 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.action.search; + +import com.fasterxml.jackson.core.io.JsonStringEncoder; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.logging.Loggers; +import org.opensearch.common.logging.OpenSearchLogMessage; +import org.opensearch.common.logging.SlowLogLevel; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.tasks.Task; + +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +/** + * The request-level search slow log implementation + * + * @opensearch.internal + */ +public final class SearchRequestSlowLog extends SearchRequestOperationsListener { + private static final Charset UTF_8 = StandardCharsets.UTF_8; + + private long warnThreshold; + private long infoThreshold; + private long debugThreshold; + private long traceThreshold; + private SlowLogLevel level; + + private final Logger logger; + + static final String CLUSTER_SEARCH_REQUEST_SLOWLOG_PREFIX = "cluster.search.request.slowlog"; + + public static final Setting CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_WARN_SETTING = Setting.timeSetting( + CLUSTER_SEARCH_REQUEST_SLOWLOG_PREFIX + ".threshold.warn", + TimeValue.timeValueNanos(-1), + TimeValue.timeValueMillis(-1), + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + public static final Setting CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_INFO_SETTING = Setting.timeSetting( + CLUSTER_SEARCH_REQUEST_SLOWLOG_PREFIX + ".threshold.info", + TimeValue.timeValueNanos(-1), + TimeValue.timeValueMillis(-1), + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + public static final Setting CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_DEBUG_SETTING = Setting.timeSetting( + CLUSTER_SEARCH_REQUEST_SLOWLOG_PREFIX + ".threshold.debug", + TimeValue.timeValueNanos(-1), + TimeValue.timeValueMillis(-1), + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + public static final Setting CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_TRACE_SETTING = Setting.timeSetting( + CLUSTER_SEARCH_REQUEST_SLOWLOG_PREFIX + ".threshold.trace", + TimeValue.timeValueNanos(-1), + TimeValue.timeValueMillis(-1), + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + public static final Setting CLUSTER_SEARCH_REQUEST_SLOWLOG_LEVEL = new Setting<>( + CLUSTER_SEARCH_REQUEST_SLOWLOG_PREFIX + ".level", + SlowLogLevel.TRACE.name(), + SlowLogLevel::parse, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + private static final ToXContent.Params FORMAT_PARAMS = new ToXContent.MapParams(Collections.singletonMap("pretty", "false")); + + public SearchRequestSlowLog(ClusterService clusterService) { + this(clusterService, LogManager.getLogger(CLUSTER_SEARCH_REQUEST_SLOWLOG_PREFIX)); // logger configured in log4j2.properties + } + + SearchRequestSlowLog(ClusterService clusterService, Logger logger) { + this.logger = logger; + Loggers.setLevel(this.logger, SlowLogLevel.TRACE.name()); + + this.warnThreshold = clusterService.getClusterSettings().get(CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_WARN_SETTING).nanos(); + this.infoThreshold = clusterService.getClusterSettings().get(CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_INFO_SETTING).nanos(); + this.debugThreshold = clusterService.getClusterSettings().get(CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_DEBUG_SETTING).nanos(); + this.traceThreshold = clusterService.getClusterSettings().get(CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_TRACE_SETTING).nanos(); + this.level = clusterService.getClusterSettings().get(CLUSTER_SEARCH_REQUEST_SLOWLOG_LEVEL); + + clusterService.getClusterSettings() + .addSettingsUpdateConsumer(CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_WARN_SETTING, this::setWarnThreshold); + clusterService.getClusterSettings() + .addSettingsUpdateConsumer(CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_INFO_SETTING, this::setInfoThreshold); + clusterService.getClusterSettings() + .addSettingsUpdateConsumer(CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_DEBUG_SETTING, this::setDebugThreshold); + clusterService.getClusterSettings() + .addSettingsUpdateConsumer(CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_TRACE_SETTING, this::setTraceThreshold); + clusterService.getClusterSettings().addSettingsUpdateConsumer(CLUSTER_SEARCH_REQUEST_SLOWLOG_LEVEL, this::setLevel); + } + + @Override + void onPhaseStart(SearchPhaseContext context) {} + + @Override + void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) {} + + @Override + void onPhaseFailure(SearchPhaseContext context) {} + + @Override + void onRequestStart(SearchRequestContext searchRequestContext) {} + + @Override + void onRequestEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { + long tookInNanos = System.nanoTime() - searchRequestContext.getAbsoluteStartNanos(); + + if (warnThreshold >= 0 && tookInNanos > warnThreshold && level.isLevelEnabledFor(SlowLogLevel.WARN)) { + logger.warn(new SearchRequestSlowLogMessage(context, tookInNanos, searchRequestContext)); + } else if (infoThreshold >= 0 && tookInNanos > infoThreshold && level.isLevelEnabledFor(SlowLogLevel.INFO)) { + logger.info(new SearchRequestSlowLogMessage(context, tookInNanos, searchRequestContext)); + } else if (debugThreshold >= 0 && tookInNanos > debugThreshold && level.isLevelEnabledFor(SlowLogLevel.DEBUG)) { + logger.debug(new SearchRequestSlowLogMessage(context, tookInNanos, searchRequestContext)); + } else if (traceThreshold >= 0 && tookInNanos > traceThreshold && level.isLevelEnabledFor(SlowLogLevel.TRACE)) { + logger.trace(new SearchRequestSlowLogMessage(context, tookInNanos, searchRequestContext)); + } + } + + /** + * Search request slow log message + * + * @opensearch.internal + */ + static final class SearchRequestSlowLogMessage extends OpenSearchLogMessage { + + SearchRequestSlowLogMessage(SearchPhaseContext context, long tookInNanos, SearchRequestContext searchRequestContext) { + super(prepareMap(context, tookInNanos, searchRequestContext), message(context, tookInNanos, searchRequestContext)); + } + + private static Map prepareMap( + SearchPhaseContext context, + long tookInNanos, + SearchRequestContext searchRequestContext + ) { + final Map messageFields = new HashMap<>(); + messageFields.put("took", TimeValue.timeValueNanos(tookInNanos)); + messageFields.put("took_millis", TimeUnit.NANOSECONDS.toMillis(tookInNanos)); + messageFields.put("phase_took", searchRequestContext.phaseTookMap().toString()); + if (searchRequestContext.totalHits() != null) { + messageFields.put("total_hits", searchRequestContext.totalHits()); + } else { + messageFields.put("total_hits", "-1"); + } + messageFields.put("search_type", context.getRequest().searchType()); + messageFields.put("shards", searchRequestContext.formattedShardStats()); + + if (context.getRequest().source() != null) { + String source = escapeJson(context.getRequest().source().toString(FORMAT_PARAMS)); + messageFields.put("source", source); + } else { + messageFields.put("source", "{}"); + } + + messageFields.put("id", context.getTask().getHeader(Task.X_OPAQUE_ID)); + return messageFields; + } + + // Message will be used in plaintext logs + private static String message(SearchPhaseContext context, long tookInNanos, SearchRequestContext searchRequestContext) { + final StringBuilder sb = new StringBuilder(); + sb.append("took[").append(TimeValue.timeValueNanos(tookInNanos)).append("], "); + sb.append("took_millis[").append(TimeUnit.NANOSECONDS.toMillis(tookInNanos)).append("], "); + sb.append("phase_took_millis[").append(searchRequestContext.phaseTookMap().toString()).append("], "); + if (searchRequestContext.totalHits() != null) { + sb.append("total_hits[").append(searchRequestContext.totalHits()).append("], "); + } else { + sb.append("total_hits[-1]"); + } + sb.append("search_type[").append(context.getRequest().searchType()).append("], "); + sb.append("shards[").append(searchRequestContext.formattedShardStats()).append("], "); + if (context.getRequest().source() != null) { + sb.append("source[").append(context.getRequest().source().toString(FORMAT_PARAMS)).append("], "); + } else { + sb.append("source[], "); + } + if (context.getTask().getHeader(Task.X_OPAQUE_ID) != null) { + sb.append("id[").append(context.getTask().getHeader(Task.X_OPAQUE_ID)).append("]"); + } else { + sb.append("id[]"); + } + return sb.toString(); + } + + private static String escapeJson(String text) { + byte[] sourceEscaped = JsonStringEncoder.getInstance().quoteAsUTF8(text); + return new String(sourceEscaped, UTF_8); + } + } + + void setWarnThreshold(TimeValue warnThreshold) { + this.warnThreshold = warnThreshold.nanos(); + } + + void setInfoThreshold(TimeValue infoThreshold) { + this.infoThreshold = infoThreshold.nanos(); + } + + void setDebugThreshold(TimeValue debugThreshold) { + this.debugThreshold = debugThreshold.nanos(); + } + + void setTraceThreshold(TimeValue traceThreshold) { + this.traceThreshold = traceThreshold.nanos(); + } + + void setLevel(SlowLogLevel level) { + this.level = level; + } + + protected long getWarnThreshold() { + return warnThreshold; + } + + protected long getInfoThreshold() { + return infoThreshold; + } + + protected long getDebugThreshold() { + return debugThreshold; + } + + protected long getTraceThreshold() { + return traceThreshold; + } + + SlowLogLevel getLevel() { + return level; + } +} diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java b/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java index 6b7c94ec3037a..262750849eaa9 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java @@ -23,7 +23,7 @@ * @opensearch.api */ @PublicApi(since = "2.11.0") -public final class SearchRequestStats implements SearchRequestOperationsListener { +public final class SearchRequestStats extends SearchRequestOperationsListener { Map phaseStatsMap = new EnumMap<>(SearchPhaseName.class); @Inject @@ -46,12 +46,12 @@ public long getPhaseMetric(SearchPhaseName searchPhaseName) { } @Override - public void onPhaseStart(SearchPhaseContext context) { + void onPhaseStart(SearchPhaseContext context) { phaseStatsMap.get(context.getCurrentPhase().getSearchPhaseName()).current.inc(); } @Override - public void onPhaseEnd(SearchPhaseContext context) { + void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { StatsHolder phaseStats = phaseStatsMap.get(context.getCurrentPhase().getSearchPhaseName()); phaseStats.current.dec(); phaseStats.total.inc(); @@ -59,7 +59,7 @@ public void onPhaseEnd(SearchPhaseContext context) { } @Override - public void onPhaseFailure(SearchPhaseContext context) { + void onPhaseFailure(SearchPhaseContext context) { phaseStatsMap.get(context.getCurrentPhase().getSearchPhaseName()).current.dec(); } diff --git a/server/src/main/java/org/opensearch/action/search/SearchShardIterator.java b/server/src/main/java/org/opensearch/action/search/SearchShardIterator.java index fbd85a3fc0b8f..33d9bf70021a0 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchShardIterator.java +++ b/server/src/main/java/org/opensearch/action/search/SearchShardIterator.java @@ -36,6 +36,7 @@ import org.opensearch.cluster.routing.PlainShardIterator; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.Countable; import org.opensearch.common.util.PlainIterator; @@ -54,8 +55,9 @@ * the cluster alias. * @see OriginalIndices * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class SearchShardIterator implements Comparable, Countable { private final OriginalIndices originalIndices; diff --git a/server/src/main/java/org/opensearch/action/search/SearchShardTask.java b/server/src/main/java/org/opensearch/action/search/SearchShardTask.java index 54faaf363cb70..dfecf4f462c4d 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchShardTask.java +++ b/server/src/main/java/org/opensearch/action/search/SearchShardTask.java @@ -33,6 +33,7 @@ package org.opensearch.action.search; import org.opensearch.common.MemoizedSupplier; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.tasks.TaskId; import org.opensearch.search.fetch.ShardFetchSearchRequest; import org.opensearch.search.internal.ShardSearchRequest; @@ -46,8 +47,9 @@ * Task storing information about a currently running search shard request. * See {@link ShardSearchRequest}, {@link ShardFetchSearchRequest}, ... * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SearchShardTask extends CancellableTask implements SearchBackpressureTask { // generating metadata in a lazy way since source can be quite big private final MemoizedSupplier metadataSupplier; diff --git a/server/src/main/java/org/opensearch/action/search/SearchTransportService.java b/server/src/main/java/org/opensearch/action/search/SearchTransportService.java index a723937afd2ed..64c738f633f2e 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchTransportService.java +++ b/server/src/main/java/org/opensearch/action/search/SearchTransportService.java @@ -45,6 +45,7 @@ import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.transport.TransportResponse; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; import org.opensearch.search.SearchPhaseResult; import org.opensearch.search.SearchService; import org.opensearch.search.dfs.DfsSearchResult; @@ -542,6 +543,9 @@ public static void registerRequestHandler(TransportService transportService, Sea transportService.registerRequestHandler( DFS_ACTION_NAME, ThreadPool.Names.SAME, + false, + true, + AdmissionControlActionType.SEARCH, ShardSearchRequest::new, (request, channel, task) -> searchService.executeDfsPhase( request, @@ -556,6 +560,9 @@ public static void registerRequestHandler(TransportService transportService, Sea transportService.registerRequestHandler( QUERY_ACTION_NAME, ThreadPool.Names.SAME, + false, + true, + AdmissionControlActionType.SEARCH, ShardSearchRequest::new, (request, channel, task) -> { searchService.executeQueryPhase( @@ -575,6 +582,9 @@ public static void registerRequestHandler(TransportService transportService, Sea transportService.registerRequestHandler( QUERY_ID_ACTION_NAME, ThreadPool.Names.SAME, + false, + true, + AdmissionControlActionType.SEARCH, QuerySearchRequest::new, (request, channel, task) -> { searchService.executeQueryPhase( @@ -633,6 +643,7 @@ public static void registerRequestHandler(TransportService transportService, Sea ThreadPool.Names.SAME, true, true, + AdmissionControlActionType.SEARCH, ShardFetchSearchRequest::new, (request, channel, task) -> { searchService.executeFetchPhase( diff --git a/server/src/main/java/org/opensearch/action/search/ShardSearchFailure.java b/server/src/main/java/org/opensearch/action/search/ShardSearchFailure.java index 4d3f9b71a919c..ef490844db692 100644 --- a/server/src/main/java/org/opensearch/action/search/ShardSearchFailure.java +++ b/server/src/main/java/org/opensearch/action/search/ShardSearchFailure.java @@ -37,6 +37,7 @@ import org.opensearch.action.OriginalIndices; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.ShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -56,8 +57,9 @@ /** * Represents a failure to search on a specific shard. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ShardSearchFailure extends ShardOperationFailedException { private static final String REASON_FIELD = "reason"; diff --git a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java index 16b7e4810b130..05f4308df74fa 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java @@ -67,7 +67,6 @@ import org.opensearch.core.common.breaker.CircuitBreaker; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.core.common.util.CollectionUtils; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.indices.breaker.CircuitBreakerService; @@ -188,6 +187,7 @@ public class TransportSearchAction extends HandledTransportAction) SearchRequest::new); @@ -226,6 +227,7 @@ public TransportSearchAction( this.isRequestStatsEnabled = clusterService.getClusterSettings().get(SEARCH_REQUEST_STATS_ENABLED); clusterService.getClusterSettings().addSettingsUpdateConsumer(SEARCH_REQUEST_STATS_ENABLED, this::setIsRequestStatsEnabled); this.searchRequestStats = searchRequestStats; + this.searchRequestSlowLog = searchRequestSlowLog; this.metricsRegistry = metricsRegistry; this.searchQueryMetricsEnabled = clusterService.getClusterSettings().get(SEARCH_QUERY_METRICS_ENABLED_SETTING); clusterService.getClusterSettings() @@ -298,7 +300,7 @@ private Map resolveIndexBoosts(SearchRequest searchRequest, Clust * * @opensearch.internal */ - static final class SearchTimeProvider implements SearchRequestOperationsListener { + static final class SearchTimeProvider extends SearchRequestOperationsListener { private final long absoluteStartMillis; private final long relativeStartNanos; @@ -334,10 +336,6 @@ public void setPhaseTook(boolean phaseTook) { this.phaseTook = phaseTook; } - public boolean isPhaseTook() { - return phaseTook; - } - SearchResponse.PhaseTook getPhaseTook() { if (phaseTook) { Map phaseTookMap = new HashMap<>(); @@ -354,10 +352,10 @@ SearchResponse.PhaseTook getPhaseTook() { Map phaseStatsMap = new EnumMap<>(SearchPhaseName.class); @Override - public void onPhaseStart(SearchPhaseContext context) {} + void onPhaseStart(SearchPhaseContext context) {} @Override - public void onPhaseEnd(SearchPhaseContext context) { + void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { phaseStatsMap.put( context.getCurrentPhase().getSearchPhaseName(), TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - context.getCurrentPhase().getStartTimeInNanos()) @@ -365,7 +363,7 @@ public void onPhaseEnd(SearchPhaseContext context) { } @Override - public void onPhaseFailure(SearchPhaseContext context) {} + void onPhaseFailure(SearchPhaseContext context) {} public Long getPhaseTookTime(SearchPhaseName searchPhaseName) { return phaseStatsMap.get(searchPhaseName); @@ -428,7 +426,7 @@ public AbstractSearchAsyncAction asyncSearchAction( boolean preFilter, ThreadPool threadPool, SearchResponse.Clusters clusters, - SearchRequestOperationsListener searchRequestOperationsListener + SearchRequestContext searchRequestContext ) { return new AbstractSearchAsyncAction( actionName, @@ -448,7 +446,7 @@ public AbstractSearchAsyncAction asyncSearchAction( new ArraySearchPhaseResults<>(shardsIts.size()), searchRequest.getMaxConcurrentShardRequests(), clusters, - searchRequestOperationsListener + searchRequestContext ) { @Override protected void executePhaseOnShard( @@ -494,13 +492,10 @@ private void executeRequest( ); final List searchListenersList = createSearchListenerList(originalSearchRequest, timeProvider); - - final SearchRequestOperationsListener searchRequestOperationsListener; - if (!CollectionUtils.isEmpty(searchListenersList)) { - searchRequestOperationsListener = new SearchRequestOperationsListener.CompositeListener(searchListenersList, logger); - } else { - searchRequestOperationsListener = null; - } + SearchRequestContext searchRequestContext = new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(searchListenersList, logger) + ); + searchRequestContext.getSearchRequestOperationsListener().onRequestStart(searchRequestContext); PipelinedRequest searchRequest; ActionListener listener; @@ -527,7 +522,7 @@ private void executeRequest( timeProvider, searchAsyncActionProvider, listener, - searchRequestOperationsListener + searchRequestContext ); if (sr.source() == null) { rewriteListener.onResponse(sr.source()); @@ -548,7 +543,7 @@ private ActionListener buildRewriteListener( SearchTimeProvider timeProvider, SearchAsyncActionProvider searchAsyncActionProvider, ActionListener listener, - SearchRequestOperationsListener searchRequestOperationsListener + SearchRequestContext searchRequestContext ) { return ActionListener.wrap(source -> { if (source != searchRequest.source()) { @@ -581,7 +576,7 @@ private ActionListener buildRewriteListener( listener, searchContext, searchAsyncActionProvider, - searchRequestOperationsListener + searchRequestContext ); } else { if (shouldMinimizeRoundtrips(searchRequest)) { @@ -603,7 +598,7 @@ private ActionListener buildRewriteListener( l, searchContext, searchAsyncActionProvider, - searchRequestOperationsListener + searchRequestContext ) ); } else { @@ -654,7 +649,7 @@ private ActionListener buildRewriteListener( new SearchResponse.Clusters(totalClusters, successfulClusters, skippedClusters.get()), searchContext, searchAsyncActionProvider, - searchRequestOperationsListener + searchRequestContext ); }, listener::onFailure) ); @@ -925,7 +920,7 @@ private void executeLocalSearch( ActionListener listener, SearchContextId searchContext, SearchAsyncActionProvider searchAsyncActionProvider, - SearchRequestOperationsListener searchRequestOperationsListener + SearchRequestContext searchRequestContext ) { executeSearch( (SearchTask) task, @@ -940,7 +935,7 @@ private void executeLocalSearch( SearchResponse.Clusters.EMPTY, searchContext, searchAsyncActionProvider, - searchRequestOperationsListener + searchRequestContext ); } @@ -1059,7 +1054,7 @@ private void executeSearch( SearchResponse.Clusters clusters, @Nullable SearchContextId searchContext, SearchAsyncActionProvider searchAsyncActionProvider, - SearchRequestOperationsListener searchRequestOperationsListener + SearchRequestContext searchRequestContext ) { clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ); // TODO: I think startTime() should become part of ActionRequest and that should be used both for index name @@ -1161,7 +1156,7 @@ private void executeSearch( preFilterSearchShards, threadPool, clusters, - searchRequestOperationsListener + searchRequestContext ).start(); } @@ -1245,7 +1240,7 @@ AbstractSearchAsyncAction asyncSearchAction( boolean preFilter, ThreadPool threadPool, SearchResponse.Clusters clusters, - SearchRequestOperationsListener searchRequestOperationsListener + SearchRequestContext searchRequestContext ); } @@ -1268,6 +1263,13 @@ private List createSearchListenerList(SearchReq searchListenersList.add(timeProvider); } + if (searchRequestSlowLog.getWarnThreshold() >= 0 + || searchRequestSlowLog.getInfoThreshold() >= 0 + || searchRequestSlowLog.getDebugThreshold() >= 0 + || searchRequestSlowLog.getTraceThreshold() >= 0) { + searchListenersList.add(searchRequestSlowLog); + } + return searchListenersList; } @@ -1286,7 +1288,7 @@ private AbstractSearchAsyncAction searchAsyncAction boolean preFilter, ThreadPool threadPool, SearchResponse.Clusters clusters, - SearchRequestOperationsListener searchRequestOperationsListener + SearchRequestContext searchRequestContext ) { if (preFilter) { return new CanMatchPreFilterSearchPhase( @@ -1319,7 +1321,7 @@ private AbstractSearchAsyncAction searchAsyncAction false, threadPool, clusters, - searchRequestOperationsListener + searchRequestContext ); return new SearchPhase(action.getName()) { @Override @@ -1329,7 +1331,7 @@ public void run() { }; }, clusters, - searchRequestOperationsListener + searchRequestContext ); } else { final QueryPhaseResultConsumer queryResultConsumer = searchPhaseController.newSearchPhaseResults( @@ -1360,7 +1362,7 @@ public void run() { clusterState, task, clusters, - searchRequestOperationsListener + searchRequestContext ); break; case QUERY_THEN_FETCH: @@ -1381,7 +1383,7 @@ public void run() { clusterState, task, clusters, - searchRequestOperationsListener + searchRequestContext ); break; default: diff --git a/server/src/main/java/org/opensearch/action/support/ActiveShardCount.java b/server/src/main/java/org/opensearch/action/support/ActiveShardCount.java index 15275ba48fc6e..e91342a7ce4b8 100644 --- a/server/src/main/java/org/opensearch/action/support/ActiveShardCount.java +++ b/server/src/main/java/org/opensearch/action/support/ActiveShardCount.java @@ -36,6 +36,7 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.routing.IndexRoutingTable; import org.opensearch.cluster.routing.IndexShardRoutingTable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -48,8 +49,9 @@ * A class whose instances represent a value for counting the number * of active shard copies for a given shard in an index. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class ActiveShardCount implements Writeable { private static final int ACTIVE_SHARD_COUNT_DEFAULT = -2; diff --git a/server/src/main/java/org/opensearch/action/support/RetryableAction.java b/server/src/main/java/org/opensearch/action/support/RetryableAction.java index fc2ee277b538a..d555a876f20fb 100644 --- a/server/src/main/java/org/opensearch/action/support/RetryableAction.java +++ b/server/src/main/java/org/opensearch/action/support/RetryableAction.java @@ -36,6 +36,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.action.ActionRunnable; import org.opensearch.action.bulk.BackoffPolicy; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.action.ActionListener; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; @@ -52,8 +53,9 @@ * default. The action will be retried with exponentially increasing delay periods until the timeout period * has been reached. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class RetryableAction { private final Logger logger; diff --git a/server/src/main/java/org/opensearch/action/support/replication/PendingReplicationActions.java b/server/src/main/java/org/opensearch/action/support/replication/PendingReplicationActions.java index c52bc6c2c799d..bb6bfc6fd4773 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/PendingReplicationActions.java +++ b/server/src/main/java/org/opensearch/action/support/replication/PendingReplicationActions.java @@ -33,6 +33,7 @@ package org.opensearch.action.support.replication; import org.opensearch.action.support.RetryableAction; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lease.Releasable; import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.core.index.shard.ShardId; @@ -51,8 +52,9 @@ /** * Pending Replication Actions * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class PendingReplicationActions implements Consumer, Releasable { private final Map>> onGoingReplicationActions = ConcurrentCollections.newConcurrentMap(); diff --git a/server/src/main/java/org/opensearch/action/support/replication/ReplicationMode.java b/server/src/main/java/org/opensearch/action/support/replication/ReplicationMode.java index f9b85cc4bd7aa..dd3a38c1398ab 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/ReplicationMode.java +++ b/server/src/main/java/org/opensearch/action/support/replication/ReplicationMode.java @@ -8,11 +8,14 @@ package org.opensearch.action.support.replication; +import org.opensearch.common.annotation.PublicApi; + /** * The type of replication used for inter-node replication. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public enum ReplicationMode { /** * In this mode, a {@code TransportReplicationAction} is fanned out to underlying concerned shard and is replicated logically. diff --git a/server/src/main/java/org/opensearch/action/support/replication/ReplicationResponse.java b/server/src/main/java/org/opensearch/action/support/replication/ReplicationResponse.java index 825405cb68549..67480976c500f 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/ReplicationResponse.java +++ b/server/src/main/java/org/opensearch/action/support/replication/ReplicationResponse.java @@ -36,6 +36,7 @@ import org.opensearch.OpenSearchException; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.action.ShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; @@ -57,8 +58,9 @@ /** * Base class for write action responses. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ReplicationResponse extends ActionResponse { public static final ReplicationResponse.ShardInfo.Failure[] EMPTY = new ReplicationResponse.ShardInfo.Failure[0]; @@ -88,8 +90,9 @@ public void setShardInfo(ShardInfo shardInfo) { /** * Holds shard information * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class ShardInfo implements Writeable, ToXContentObject { private static final String TOTAL = "total"; @@ -235,8 +238,9 @@ public String toString() { /** * Holds failure information * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Failure extends ShardOperationFailedException implements ToXContentObject { private static final String _INDEX = "_index"; diff --git a/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java index ddebdc5530e70..95f998e2d89c2 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java +++ b/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java @@ -82,6 +82,7 @@ import org.opensearch.indices.IndexClosedException; import org.opensearch.indices.IndicesService; import org.opensearch.node.NodeClosedException; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; import org.opensearch.tasks.Task; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.ConnectTransportException; @@ -201,6 +202,40 @@ protected TransportReplicationAction( String executor, boolean syncGlobalCheckpointAfterOperation, boolean forceExecutionOnPrimary + ) { + this( + settings, + actionName, + transportService, + clusterService, + indicesService, + threadPool, + shardStateAction, + actionFilters, + requestReader, + replicaRequestReader, + executor, + syncGlobalCheckpointAfterOperation, + forceExecutionOnPrimary, + null + ); + } + + protected TransportReplicationAction( + Settings settings, + String actionName, + TransportService transportService, + ClusterService clusterService, + IndicesService indicesService, + ThreadPool threadPool, + ShardStateAction shardStateAction, + ActionFilters actionFilters, + Writeable.Reader requestReader, + Writeable.Reader replicaRequestReader, + String executor, + boolean syncGlobalCheckpointAfterOperation, + boolean forceExecutionOnPrimary, + AdmissionControlActionType admissionControlActionType ) { super(actionName, actionFilters, transportService.getTaskManager()); this.threadPool = threadPool; @@ -219,14 +254,8 @@ protected TransportReplicationAction( transportService.registerRequestHandler(actionName, ThreadPool.Names.SAME, requestReader, this::handleOperationRequest); - transportService.registerRequestHandler( - transportPrimaryAction, - executor, - forceExecutionOnPrimary, - true, - in -> new ConcreteShardRequest<>(requestReader, in), - this::handlePrimaryRequest - ); + // This method will register Primary Request Handler Based on AdmissionControlActionType + registerPrimaryRequestHandler(requestReader, admissionControlActionType); // we must never reject on because of thread pool capacity on replicas transportService.registerRequestHandler( @@ -247,6 +276,38 @@ protected TransportReplicationAction( clusterSettings.addSettingsUpdateConsumer(REPLICATION_RETRY_TIMEOUT, (v) -> retryTimeout = v); } + /** + * This method will register handler as based on admissionControlActionType and AdmissionControlHandler will be + * invoked for registered action + * @param requestReader instance of the request reader + * @param admissionControlActionType type of AdmissionControlActionType + */ + private void registerPrimaryRequestHandler( + Writeable.Reader requestReader, + AdmissionControlActionType admissionControlActionType + ) { + if (admissionControlActionType != null) { + transportService.registerRequestHandler( + transportPrimaryAction, + executor, + forceExecutionOnPrimary, + true, + admissionControlActionType, + in -> new ConcreteShardRequest<>(requestReader, in), + this::handlePrimaryRequest + ); + } else { + transportService.registerRequestHandler( + transportPrimaryAction, + executor, + forceExecutionOnPrimary, + true, + in -> new ConcreteShardRequest<>(requestReader, in), + this::handlePrimaryRequest + ); + } + } + @Override protected void doExecute(Task task, Request request, ActionListener listener) { assert request.shardId() != null : "request shardId must be set"; diff --git a/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java b/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java index 9ebfa8cfd0df8..27f9e6dee83de 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java +++ b/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java @@ -59,6 +59,7 @@ import org.opensearch.index.translog.Translog.Location; import org.opensearch.indices.IndicesService; import org.opensearch.indices.SystemIndices; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; import org.opensearch.telemetry.tracing.Span; import org.opensearch.telemetry.tracing.SpanBuilder; import org.opensearch.telemetry.tracing.SpanScope; @@ -104,7 +105,8 @@ protected TransportWriteAction( boolean forceExecutionOnPrimary, IndexingPressureService indexingPressureService, SystemIndices systemIndices, - Tracer tracer + Tracer tracer, + AdmissionControlActionType admissionControlActionType ) { // We pass ThreadPool.Names.SAME to the super class as we control the dispatching to the // ThreadPool.Names.WRITE/ThreadPool.Names.SYSTEM_WRITE thread pools in this class. @@ -121,7 +123,8 @@ protected TransportWriteAction( replicaRequest, ThreadPool.Names.SAME, true, - forceExecutionOnPrimary + forceExecutionOnPrimary, + admissionControlActionType ); this.executorFunction = executorFunction; this.indexingPressureService = indexingPressureService; @@ -129,6 +132,43 @@ protected TransportWriteAction( this.tracer = tracer; } + protected TransportWriteAction( + Settings settings, + String actionName, + TransportService transportService, + ClusterService clusterService, + IndicesService indicesService, + ThreadPool threadPool, + ShardStateAction shardStateAction, + ActionFilters actionFilters, + Writeable.Reader request, + Writeable.Reader replicaRequest, + Function executorFunction, + boolean forceExecutionOnPrimary, + IndexingPressureService indexingPressureService, + SystemIndices systemIndices, + Tracer tracer + ) { + this( + settings, + actionName, + transportService, + clusterService, + indicesService, + threadPool, + shardStateAction, + actionFilters, + request, + replicaRequest, + executorFunction, + forceExecutionOnPrimary, + indexingPressureService, + systemIndices, + tracer, + null + ); + } + protected String executor(IndexShard shard) { return executorFunction.apply(shard); } diff --git a/server/src/main/java/org/opensearch/cluster/ClusterState.java b/server/src/main/java/org/opensearch/cluster/ClusterState.java index 1b87a60c2ccf5..9e63f961d241d 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterState.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterState.java @@ -49,6 +49,7 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.UUIDs; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.Strings; @@ -97,8 +98,9 @@ * throws the {@link IncompatibleClusterStateVersionException}, which causes the publishing mechanism to send * a full version of the cluster state to the node on which this exception was thrown. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterState implements ToXContentFragment, Diffable { public static final ClusterState EMPTY_STATE = builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).build(); @@ -139,8 +141,9 @@ static boolean shouldSerializ /** * Custom cluster state. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public interface Custom extends NamedDiffable, ToXContentFragment, FeatureAware { /** @@ -596,8 +599,9 @@ public static Builder builder(ClusterState state) { /** * Builder for cluster state. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder { private final ClusterName clusterName; diff --git a/server/src/main/java/org/opensearch/cluster/ClusterStateApplier.java b/server/src/main/java/org/opensearch/cluster/ClusterStateApplier.java index 140e6426bb801..d7702729ae884 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterStateApplier.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterStateApplier.java @@ -33,13 +33,15 @@ package org.opensearch.cluster; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.annotation.PublicApi; /** * A component that is in charge of applying an incoming cluster state to the node internal data structures. * The single apply method is called before the cluster state becomes visible via {@link ClusterService#state()}. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ClusterStateApplier { /** diff --git a/server/src/main/java/org/opensearch/cluster/ClusterStateListener.java b/server/src/main/java/org/opensearch/cluster/ClusterStateListener.java index 01a8e51a3d13e..57fcfcad099ed 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterStateListener.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterStateListener.java @@ -32,11 +32,14 @@ package org.opensearch.cluster; +import org.opensearch.common.annotation.PublicApi; + /** * A listener to be notified when a cluster state changes. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ClusterStateListener { /** diff --git a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskConfig.java b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskConfig.java index 9a4b708548a7d..149d93a158007 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskConfig.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskConfig.java @@ -33,13 +33,15 @@ import org.opensearch.common.Nullable; import org.opensearch.common.Priority; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; /** * Cluster state update task configuration for timeout and priority * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ClusterStateTaskConfig { /** * The timeout for this cluster state update task configuration. If diff --git a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskExecutor.java b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskExecutor.java index bf8494cc36857..115816798959e 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskExecutor.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskExecutor.java @@ -33,6 +33,7 @@ import org.opensearch.cluster.service.ClusterManagerTaskThrottler; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import java.util.IdentityHashMap; import java.util.List; @@ -41,8 +42,9 @@ /** * Interface that updates the cluster state based on the task * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ClusterStateTaskExecutor { /** * Update the cluster state based on the current state and the given tasks. Return the *same instance* if no state @@ -103,8 +105,9 @@ default ClusterManagerTaskThrottler.ThrottlingKey getClusterManagerThrottlingKey * Represents the result of a batched execution of cluster state update tasks * @param the type of the cluster state update task * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") class ClusterTasksResult { @Nullable public final ClusterState resultingState; @@ -127,8 +130,9 @@ public static Builder builder() { /** * Builder for cluster state task. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder { private final Map executionResults = new IdentityHashMap<>(); diff --git a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskListener.java b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskListener.java index d6c4abfad7b8d..0cb24bd3f3eab 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskListener.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskListener.java @@ -32,14 +32,16 @@ package org.opensearch.cluster; import org.opensearch.cluster.service.ClusterManagerService; +import org.opensearch.common.annotation.PublicApi; import java.util.List; /** * Interface to implement a cluster state change listener * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ClusterStateTaskListener { /** diff --git a/server/src/main/java/org/opensearch/cluster/ClusterStateUpdateTask.java b/server/src/main/java/org/opensearch/cluster/ClusterStateUpdateTask.java index 9225914a931b2..ae6626dd4785d 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterStateUpdateTask.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterStateUpdateTask.java @@ -34,6 +34,7 @@ import org.opensearch.common.Nullable; import org.opensearch.common.Priority; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import java.util.List; @@ -41,8 +42,9 @@ /** * A task that can update the cluster state. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class ClusterStateUpdateTask implements ClusterStateTaskConfig, diff --git a/server/src/main/java/org/opensearch/cluster/LocalNodeClusterManagerListener.java b/server/src/main/java/org/opensearch/cluster/LocalNodeClusterManagerListener.java index c07dcc5daaee6..0b2ed7ef66d43 100644 --- a/server/src/main/java/org/opensearch/cluster/LocalNodeClusterManagerListener.java +++ b/server/src/main/java/org/opensearch/cluster/LocalNodeClusterManagerListener.java @@ -31,12 +31,15 @@ package org.opensearch.cluster; +import org.opensearch.common.annotation.PublicApi; + /** * Enables listening to cluster-manager changes events of the local node (when the local node becomes the cluster-manager, and when the local * node cease being a cluster-manager). * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface LocalNodeClusterManagerListener extends ClusterStateListener { /** diff --git a/server/src/main/java/org/opensearch/cluster/LocalNodeMasterListener.java b/server/src/main/java/org/opensearch/cluster/LocalNodeMasterListener.java index 31c0b294b8004..2487aaf0d7c51 100644 --- a/server/src/main/java/org/opensearch/cluster/LocalNodeMasterListener.java +++ b/server/src/main/java/org/opensearch/cluster/LocalNodeMasterListener.java @@ -31,14 +31,17 @@ package org.opensearch.cluster; +import org.opensearch.common.annotation.DeprecatedApi; + /** * Enables listening to cluster-manager changes events of the local node (when the local node becomes the cluster-manager, and when the local * node cease being a cluster-manager). * - * @opensearch.internal + * @opensearch.api * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link LocalNodeClusterManagerListener} */ @Deprecated +@DeprecatedApi(since = "2.2.0") public interface LocalNodeMasterListener extends LocalNodeClusterManagerListener { /** diff --git a/server/src/main/java/org/opensearch/cluster/NamedDiff.java b/server/src/main/java/org/opensearch/cluster/NamedDiff.java index ce971aa723394..e994cfb224386 100644 --- a/server/src/main/java/org/opensearch/cluster/NamedDiff.java +++ b/server/src/main/java/org/opensearch/cluster/NamedDiff.java @@ -33,13 +33,15 @@ package org.opensearch.cluster; import org.opensearch.Version; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.NamedWriteable; /** * Diff that also support NamedWriteable interface * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface NamedDiff> extends Diff, NamedWriteable { /** * The minimal version of the recipient this custom object can be sent to diff --git a/server/src/main/java/org/opensearch/cluster/NodeConnectionsService.java b/server/src/main/java/org/opensearch/cluster/NodeConnectionsService.java index 5b7bb19a935b1..1c12c260b3929 100644 --- a/server/src/main/java/org/opensearch/cluster/NodeConnectionsService.java +++ b/server/src/main/java/org/opensearch/cluster/NodeConnectionsService.java @@ -43,6 +43,7 @@ import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.service.ClusterApplier; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Inject; import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.settings.Setting; @@ -81,8 +82,9 @@ * This component does not block on disconnections at all, because a disconnection might need to wait for an ongoing (background) connection * attempt to complete first. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NodeConnectionsService extends AbstractLifecycleComponent { private static final Logger logger = LogManager.getLogger(NodeConnectionsService.class); diff --git a/server/src/main/java/org/opensearch/cluster/RestoreInProgress.java b/server/src/main/java/org/opensearch/cluster/RestoreInProgress.java index 3e0c78099e6b4..769f97373f7b7 100644 --- a/server/src/main/java/org/opensearch/cluster/RestoreInProgress.java +++ b/server/src/main/java/org/opensearch/cluster/RestoreInProgress.java @@ -34,6 +34,7 @@ import org.opensearch.Version; import org.opensearch.cluster.ClusterState.Custom; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -54,8 +55,9 @@ /** * Meta data about restore processes that are currently executing * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RestoreInProgress extends AbstractNamedDiffable implements Custom, Iterable { /** @@ -139,8 +141,9 @@ public RestoreInProgress build() { /** * Restore metadata * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Entry { private final String uuid; private final State state; @@ -238,8 +241,9 @@ public int hashCode() { /** * Represents status of a restored shard * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class ShardRestoreStatus implements Writeable { private State state; private String nodeId; @@ -363,8 +367,9 @@ public int hashCode() { /** * Shard restore process state * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum State { /** * Initializing state diff --git a/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java b/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java index 952591f346536..3de23d2490c63 100644 --- a/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java +++ b/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java @@ -35,6 +35,7 @@ import org.opensearch.Version; import org.opensearch.cluster.ClusterState.Custom; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -928,8 +929,9 @@ public String toString() { /** * State of the snapshots. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum State { INIT((byte) 0, false), STARTED((byte) 1, false), diff --git a/server/src/main/java/org/opensearch/cluster/TimeoutClusterStateListener.java b/server/src/main/java/org/opensearch/cluster/TimeoutClusterStateListener.java index eb31fa2b7e69d..f0fa5af64d157 100644 --- a/server/src/main/java/org/opensearch/cluster/TimeoutClusterStateListener.java +++ b/server/src/main/java/org/opensearch/cluster/TimeoutClusterStateListener.java @@ -32,13 +32,15 @@ package org.opensearch.cluster; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; /** * An exception to cluster state listener that allows for timeouts and for post added notifications. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface TimeoutClusterStateListener extends ClusterStateListener { void postAdded(); diff --git a/server/src/main/java/org/opensearch/cluster/ack/ClusterStateUpdateResponse.java b/server/src/main/java/org/opensearch/cluster/ack/ClusterStateUpdateResponse.java index db26496c6f263..837bf8af449ad 100644 --- a/server/src/main/java/org/opensearch/cluster/ack/ClusterStateUpdateResponse.java +++ b/server/src/main/java/org/opensearch/cluster/ack/ClusterStateUpdateResponse.java @@ -32,11 +32,14 @@ package org.opensearch.cluster.ack; +import org.opensearch.common.annotation.PublicApi; + /** * Base response returned after a cluster state update * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterStateUpdateResponse { private final boolean acknowledged; diff --git a/server/src/main/java/org/opensearch/cluster/block/ClusterBlockException.java b/server/src/main/java/org/opensearch/cluster/block/ClusterBlockException.java index 4673f075e8439..ea6c0c69c523d 100644 --- a/server/src/main/java/org/opensearch/cluster/block/ClusterBlockException.java +++ b/server/src/main/java/org/opensearch/cluster/block/ClusterBlockException.java @@ -33,6 +33,7 @@ package org.opensearch.cluster.block; import org.opensearch.OpenSearchException; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.rest.RestStatus; @@ -49,8 +50,9 @@ /** * Internal exception on obtaining a cluster block * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterBlockException extends OpenSearchException { private final Set blocks; diff --git a/server/src/main/java/org/opensearch/cluster/block/ClusterBlocks.java b/server/src/main/java/org/opensearch/cluster/block/ClusterBlocks.java index bcb4f7f71268e..304136166d515 100644 --- a/server/src/main/java/org/opensearch/cluster/block/ClusterBlocks.java +++ b/server/src/main/java/org/opensearch/cluster/block/ClusterBlocks.java @@ -37,6 +37,7 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.MetadataIndexStateService; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.util.set.Sets; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -59,8 +60,9 @@ /** * Represents current cluster level blocks to block dirty operations done against the cluster. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterBlocks extends AbstractDiffable { public static final ClusterBlocks EMPTY_CLUSTER_BLOCK = new ClusterBlocks(emptySet(), Map.of()); @@ -355,8 +357,9 @@ public static Builder builder() { /** * Builder for cluster blocks. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder { private final Set global = new HashSet<>(); diff --git a/server/src/main/java/org/opensearch/cluster/block/IndexCreateBlockException.java b/server/src/main/java/org/opensearch/cluster/block/IndexCreateBlockException.java index 729d76c72e99e..3e9fa3e23a12f 100644 --- a/server/src/main/java/org/opensearch/cluster/block/IndexCreateBlockException.java +++ b/server/src/main/java/org/opensearch/cluster/block/IndexCreateBlockException.java @@ -8,6 +8,7 @@ package org.opensearch.cluster.block; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import java.io.IOException; @@ -16,8 +17,9 @@ /** * Internal exception on obtaining an index create block * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndexCreateBlockException extends ClusterBlockException { public IndexCreateBlockException(Set globalLevelBlocks) { diff --git a/server/src/main/java/org/opensearch/cluster/coordination/ClusterStatePublisher.java b/server/src/main/java/org/opensearch/cluster/coordination/ClusterStatePublisher.java index 3a506397bcac8..0ef8262a216ee 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/ClusterStatePublisher.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/ClusterStatePublisher.java @@ -34,14 +34,16 @@ import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.action.ActionListener; /** * Publishes the cluster state * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ClusterStatePublisher { /** * Publish all the changes to the cluster from the cluster-manager (can be called just by the cluster-manager). The publish @@ -59,8 +61,9 @@ public interface ClusterStatePublisher { /** * An acknowledgement listener. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") interface AckListener { /** * Should be called when the cluster coordination layer has committed the cluster state (i.e. even if this publication fails, diff --git a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationMetadata.java b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationMetadata.java index 869c86ae75b90..53398d6f3f98f 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationMetadata.java @@ -32,6 +32,7 @@ package org.opensearch.cluster.coordination; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.util.set.Sets; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; @@ -55,8 +56,9 @@ /** * Metadata for cluster coordination * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CoordinationMetadata implements Writeable, ToXContentFragment { public static final CoordinationMetadata EMPTY_METADATA = builder().build(); @@ -214,8 +216,9 @@ public String toString() { /** * Builder for coordination metadata. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder { private long term = 0; private VotingConfiguration lastCommittedConfiguration = VotingConfiguration.EMPTY_CONFIG; @@ -266,8 +269,9 @@ public CoordinationMetadata build() { /** * Excluded nodes from voting config. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class VotingConfigExclusion implements Writeable, ToXContentFragment { public static final String MISSING_VALUE_MARKER = "_absent_"; private final String nodeId; @@ -362,8 +366,9 @@ public String toString() { /** * A collection of persistent node ids, denoting the voting configuration for cluster state changes. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class VotingConfiguration implements Writeable, ToXContentFragment { public static final VotingConfiguration EMPTY_CONFIG = new VotingConfiguration(Collections.emptySet()); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java index a4ffab7fb70c9..5a07f964f94a4 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java @@ -261,9 +261,10 @@ public Coordinator( this::handlePublishRequest, this::handleApplyCommit ); - this.leaderChecker = new LeaderChecker(settings, transportService, this::onLeaderFailure, nodeHealthService); + this.leaderChecker = new LeaderChecker(settings, clusterSettings, transportService, this::onLeaderFailure, nodeHealthService); this.followersChecker = new FollowersChecker( settings, + clusterSettings, transportService, this::onFollowerCheckRequest, this::removeNode, diff --git a/server/src/main/java/org/opensearch/cluster/coordination/FollowersChecker.java b/server/src/main/java/org/opensearch/cluster/coordination/FollowersChecker.java index f69a4f771cf21..70bb0515bb022 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/FollowersChecker.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/FollowersChecker.java @@ -38,6 +38,7 @@ import org.opensearch.cluster.coordination.Coordinator.Mode; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; @@ -98,7 +99,9 @@ public class FollowersChecker { "cluster.fault_detection.follower_check.timeout", TimeValue.timeValueMillis(10000), TimeValue.timeValueMillis(1), - Setting.Property.NodeScope + TimeValue.timeValueMillis(60000), + Setting.Property.NodeScope, + Setting.Property.Dynamic ); // the number of failed checks that must happen before the follower is considered to have failed. @@ -112,7 +115,7 @@ public class FollowersChecker { private final Settings settings; private final TimeValue followerCheckInterval; - private final TimeValue followerCheckTimeout; + private TimeValue followerCheckTimeout; private final int followerCheckRetryCount; private final BiConsumer onNodeFailure; private final Consumer handleRequestAndUpdateState; @@ -127,6 +130,7 @@ public class FollowersChecker { public FollowersChecker( Settings settings, + ClusterSettings clusterSettings, TransportService transportService, Consumer handleRequestAndUpdateState, BiConsumer onNodeFailure, @@ -141,7 +145,7 @@ public FollowersChecker( followerCheckInterval = FOLLOWER_CHECK_INTERVAL_SETTING.get(settings); followerCheckTimeout = FOLLOWER_CHECK_TIMEOUT_SETTING.get(settings); followerCheckRetryCount = FOLLOWER_CHECK_RETRY_COUNT_SETTING.get(settings); - + clusterSettings.addSettingsUpdateConsumer(FOLLOWER_CHECK_TIMEOUT_SETTING, this::setFollowerCheckTimeout); updateFastResponseState(0, Mode.CANDIDATE); transportService.registerRequestHandler( FOLLOWER_CHECK_ACTION_NAME, @@ -159,6 +163,10 @@ public void onNodeDisconnected(DiscoveryNode node, Transport.Connection connecti }); } + private void setFollowerCheckTimeout(TimeValue followerCheckTimeout) { + this.followerCheckTimeout = followerCheckTimeout; + } + /** * Update the set of known nodes, starting to check any new ones and stopping checking any previously-known-but-now-unknown ones. */ diff --git a/server/src/main/java/org/opensearch/cluster/coordination/LeaderChecker.java b/server/src/main/java/org/opensearch/cluster/coordination/LeaderChecker.java index 69ba1f977f326..8d4373b865f62 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/LeaderChecker.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/LeaderChecker.java @@ -40,6 +40,7 @@ import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.Nullable; import org.opensearch.common.lease.Releasable; +import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; @@ -97,7 +98,9 @@ public class LeaderChecker { "cluster.fault_detection.leader_check.timeout", TimeValue.timeValueMillis(10000), TimeValue.timeValueMillis(1), - Setting.Property.NodeScope + TimeValue.timeValueMillis(60000), + Setting.Property.NodeScope, + Setting.Property.Dynamic ); // the number of failed checks that must happen before the leader is considered to have failed. @@ -111,7 +114,7 @@ public class LeaderChecker { private final Settings settings; private final TimeValue leaderCheckInterval; - private final TimeValue leaderCheckTimeout; + private TimeValue leaderCheckTimeout; private final int leaderCheckRetryCount; private final TransportService transportService; private final Consumer onLeaderFailure; @@ -123,6 +126,7 @@ public class LeaderChecker { LeaderChecker( final Settings settings, + final ClusterSettings clusterSettings, final TransportService transportService, final Consumer onLeaderFailure, NodeHealthService nodeHealthService @@ -134,6 +138,7 @@ public class LeaderChecker { this.transportService = transportService; this.onLeaderFailure = onLeaderFailure; this.nodeHealthService = nodeHealthService; + clusterSettings.addSettingsUpdateConsumer(LEADER_CHECK_TIMEOUT_SETTING, this::setLeaderCheckTimeout); transportService.registerRequestHandler( LEADER_CHECK_ACTION_NAME, @@ -155,6 +160,10 @@ public void onNodeDisconnected(DiscoveryNode node, Transport.Connection connecti }); } + private void setLeaderCheckTimeout(TimeValue leaderCheckTimeout) { + this.leaderCheckTimeout = leaderCheckTimeout; + } + public DiscoveryNode leader() { CheckScheduler checkScheduler = currentChecker.get(); return checkScheduler == null ? null : checkScheduler.leader; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PersistedStateStats.java b/server/src/main/java/org/opensearch/cluster/coordination/PersistedStateStats.java index 4d466c4b3ad73..0b7ed4fee5775 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/PersistedStateStats.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/PersistedStateStats.java @@ -8,6 +8,7 @@ package org.opensearch.cluster.coordination; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -22,8 +23,9 @@ /** * Persisted cluster state related stats. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.12.0") public class PersistedStateStats implements Writeable, ToXContentObject { private final String statsName; private AtomicLong totalTimeInMillis = new AtomicLong(0); diff --git a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttributeMetadata.java b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttributeMetadata.java index 3df807a4f94d3..254d70b0422d7 100644 --- a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttributeMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttributeMetadata.java @@ -14,6 +14,7 @@ import org.opensearch.cluster.NamedDiff; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.metadata.Metadata.Custom; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -30,8 +31,9 @@ /** * Contains metadata about decommission attribute * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class DecommissionAttributeMetadata extends AbstractNamedDiffable implements Custom { public static final String TYPE = "decommissionedAttribute"; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/ComponentTemplate.java b/server/src/main/java/org/opensearch/cluster/metadata/ComponentTemplate.java index cab1b75afe3e1..abc3712ee07e3 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/ComponentTemplate.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/ComponentTemplate.java @@ -35,6 +35,7 @@ import org.opensearch.cluster.AbstractDiffable; import org.opensearch.cluster.Diff; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -55,8 +56,9 @@ * contains a field "foo", it's expected to contain all the necessary settings/mappings/etc for the * "foo" field. These component templates make up the individual pieces composing an index template. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ComponentTemplate extends AbstractDiffable implements ToXContentObject { private static final ParseField TEMPLATE = new ParseField("template"); private static final ParseField VERSION = new ParseField("version"); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplate.java b/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplate.java index 5f1291f6c6d82..e7f1b97f28842 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplate.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplate.java @@ -36,6 +36,7 @@ import org.opensearch.cluster.Diff; import org.opensearch.cluster.metadata.DataStream.TimestampField; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -63,8 +64,9 @@ * ids corresponding to component templates that should be composed in order when creating a new * index. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ComposableIndexTemplate extends AbstractDiffable implements ToXContentObject { private static final ParseField INDEX_PATTERNS = new ParseField("index_patterns"); private static final ParseField TEMPLATE = new ParseField("template"); @@ -284,8 +286,9 @@ public String toString() { /** * Template for data stream. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class DataStreamTemplate implements Writeable, ToXContentObject { private static final ParseField TIMESTAMP_FIELD_FIELD = new ParseField("timestamp_field"); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/DiffableStringMap.java b/server/src/main/java/org/opensearch/cluster/metadata/DiffableStringMap.java index 8209c7bb56ad8..a8102182576ff 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/DiffableStringMap.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/DiffableStringMap.java @@ -34,6 +34,7 @@ import org.opensearch.cluster.Diff; import org.opensearch.cluster.Diffable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -50,8 +51,9 @@ * This is a {@code Map} that implements AbstractDiffable so it * can be used for cluster state purposes * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DiffableStringMap extends AbstractMap implements Diffable { public static final DiffableStringMap EMPTY = new DiffableStringMap(Collections.emptyMap()); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexAbstraction.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexAbstraction.java index 0c316373e484f..0f3ee894a7f63 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexAbstraction.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexAbstraction.java @@ -33,6 +33,7 @@ import org.opensearch.common.Nullable; import org.opensearch.common.SetOnce; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.Tuple; import org.opensearch.core.common.Strings; @@ -52,8 +53,9 @@ * An index abstraction has a unique name and encapsulates all the {@link IndexMetadata} instances it is pointing to. * Also depending on type it may refer to a single or many concrete indices and may or may not have a write index. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface IndexAbstraction { /** @@ -102,7 +104,10 @@ public interface IndexAbstraction { /** * An index abstraction type. + * + * @opensearch.api */ + @PublicApi(since = "1.0.0") enum Type { /** @@ -335,8 +340,9 @@ private boolean isNonEmpty(List idxMetas) { /** * A data stream. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") class DataStream implements IndexAbstraction { private final org.opensearch.cluster.metadata.DataStream dataStream; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexGraveyard.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexGraveyard.java index 85a203e5e059a..b4f8b6b188531 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexGraveyard.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexGraveyard.java @@ -35,6 +35,7 @@ import org.opensearch.Version; import org.opensearch.cluster.Diff; import org.opensearch.cluster.NamedDiff; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.time.DateFormatter; @@ -68,8 +69,9 @@ * tombstones remain in the cluster state for a fixed period of time, after which * they are purged. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class IndexGraveyard implements Metadata.Custom { /** @@ -191,8 +193,9 @@ public static IndexGraveyard.Builder builder(final IndexGraveyard graveyard) { /** * A class to build an IndexGraveyard. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static final class Builder { private List tombstones; private int numPurged = -1; @@ -367,8 +370,9 @@ public String getWriteableName() { /** * An individual tombstone entry for representing a deleted index. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static final class Tombstone implements ToXContentObject, Writeable { private static final String INDEX_KEY = "index"; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index 2e1421c278879..03784df509ed6 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -97,8 +97,9 @@ /** * Index metadata information * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndexMetadata implements Diffable, ToXContentFragment { public static final ClusterBlock INDEX_READ_ONLY_BLOCK = new ClusterBlock( @@ -160,8 +161,9 @@ public class IndexMetadata implements Diffable, ToXContentFragmen /** * The state of the index. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum State { OPEN((byte) 0), CLOSE((byte) 1); @@ -1211,8 +1213,9 @@ public static Builder builder(IndexMetadata indexMetadata) { /** * Builder of index metadata. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder { private String index; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java index 70c1d059a1b9e..1871ed24973c2 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java @@ -49,6 +49,7 @@ import org.opensearch.cluster.decommission.DecommissionAttributeMetadata; import org.opensearch.common.Nullable; import org.opensearch.common.UUIDs; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.regex.Regex; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; @@ -98,8 +99,9 @@ /** * Metadata information * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class Metadata implements Iterable, Diffable, ToXContentFragment { private static final Logger logger = LogManager.getLogger(Metadata.class); @@ -127,8 +129,9 @@ public boolean isSegmentReplicationEnabled(String indexName) { /** * Context of the XContent. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum XContentContext { /* Custom metadata should be returns as part of API call */ API, @@ -166,8 +169,9 @@ public enum XContentContext { /** * Custom metadata. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public interface Custom extends NamedDiffable, ToXContentFragment, ClusterState.FeatureAware { EnumSet context(); @@ -1129,8 +1133,9 @@ public static Builder builder(Metadata metadata) { /** * Builder of metadata. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder { private String clusterUUID; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index 8d76a39712ee3..bb1bf94f5e984 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -891,7 +891,7 @@ static Settings aggregateIndexSettings( indexSettingsBuilder.put(IndexMetadata.SETTING_INDEX_PROVIDED_NAME, request.getProvidedName()); indexSettingsBuilder.put(SETTING_INDEX_UUID, UUIDs.randomBase64UUID()); - updateReplicationStrategy(indexSettingsBuilder, request.settings(), settings); + updateReplicationStrategy(indexSettingsBuilder, request.settings(), settings, combinedTemplateSettings); updateRemoteStoreSettings(indexSettingsBuilder, settings); if (sourceMetadata != null) { @@ -934,17 +934,33 @@ static Settings aggregateIndexSettings( * @param settingsBuilder index settings builder to be updated with relevant settings * @param requestSettings settings passed in during index create request * @param clusterSettings cluster level settings + * @param combinedTemplateSettings combined template settings which satisfy the index */ - private static void updateReplicationStrategy(Settings.Builder settingsBuilder, Settings requestSettings, Settings clusterSettings) { + private static void updateReplicationStrategy( + Settings.Builder settingsBuilder, + Settings requestSettings, + Settings clusterSettings, + Settings combinedTemplateSettings + ) { + // The replication setting is applied in the following order: + // 1. Explicit index creation request parameter + // 2. Template property for replication type + // 3. Defaults to segment if remote store attributes on the cluster + // 4. Default cluster level setting + + final ReplicationType indexReplicationType; if (INDEX_REPLICATION_TYPE_SETTING.exists(requestSettings)) { - settingsBuilder.put(SETTING_REPLICATION_TYPE, INDEX_REPLICATION_TYPE_SETTING.get(requestSettings)); + indexReplicationType = INDEX_REPLICATION_TYPE_SETTING.get(requestSettings); + } else if (INDEX_REPLICATION_TYPE_SETTING.exists(combinedTemplateSettings)) { + indexReplicationType = INDEX_REPLICATION_TYPE_SETTING.get(combinedTemplateSettings); } else if (CLUSTER_REPLICATION_TYPE_SETTING.exists(clusterSettings)) { - settingsBuilder.put(SETTING_REPLICATION_TYPE, CLUSTER_REPLICATION_TYPE_SETTING.get(clusterSettings)); + indexReplicationType = CLUSTER_REPLICATION_TYPE_SETTING.get(clusterSettings); } else if (isRemoteStoreAttributePresent(clusterSettings)) { - settingsBuilder.put(SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT); + indexReplicationType = ReplicationType.SEGMENT; } else { - settingsBuilder.put(SETTING_REPLICATION_TYPE, CLUSTER_REPLICATION_TYPE_SETTING.getDefault(clusterSettings)); + indexReplicationType = CLUSTER_REPLICATION_TYPE_SETTING.getDefault(clusterSettings); } + settingsBuilder.put(SETTING_REPLICATION_TYPE, indexReplicationType); } /** @@ -1496,7 +1512,7 @@ public static void validateTranslogRetentionSettings(Settings indexSettings) { * @param requestSettings settings passed in during index create/update request * @param clusterSettings cluster setting */ - static void validateRefreshIntervalSettings(Settings requestSettings, ClusterSettings clusterSettings) { + public static void validateRefreshIntervalSettings(Settings requestSettings, ClusterSettings clusterSettings) { if (IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.exists(requestSettings) == false) { return; } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java index 1093ac09777e7..5b03d3f7b19ce 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java @@ -93,6 +93,7 @@ import java.util.stream.Collectors; import static org.opensearch.cluster.metadata.MetadataCreateDataStreamService.validateTimestampFieldMapping; +import static org.opensearch.cluster.metadata.MetadataCreateIndexService.validateRefreshIntervalSettings; import static org.opensearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.NO_LONGER_ASSIGNED; /** @@ -1529,6 +1530,9 @@ private void validate(String name, @Nullable Settings settings, List ind Optional.empty() ); validationErrors.addAll(indexSettingsValidation); + + // validate index refresh interval settings + validateRefreshIntervalSettings(settings, clusterService.getClusterSettings()); } if (indexPatterns.stream().anyMatch(Regex::isMatchAllPattern)) { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/Template.java b/server/src/main/java/org/opensearch/cluster/metadata/Template.java index 8e367c71ed166..bd110c6af8975 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/Template.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/Template.java @@ -34,6 +34,7 @@ import org.opensearch.cluster.AbstractDiffable; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentHelper; @@ -58,8 +59,9 @@ * it is entirely independent from an index. It's a building block forming part of a regular index * template and a {@link ComponentTemplate}. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class Template extends AbstractDiffable