diff --git a/.ci/bwcVersions b/.ci/bwcVersions index a738eb54e17f6..980e08c696d54 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -36,3 +36,5 @@ BWC_VERSION: - "2.15.0" - "2.15.1" - "2.16.0" + - "2.16.1" + - "2.17.0" diff --git a/.ci/java-versions.properties b/.ci/java-versions.properties index f73122ee21a6b..e290bda773f68 100644 --- a/.ci/java-versions.properties +++ b/.ci/java-versions.properties @@ -13,7 +13,8 @@ # build and test OpenSearch for this branch. Valid Java versions # are 'java' or 'openjdk' followed by the major release number. -OPENSEARCH_BUILD_JAVA=openjdk11 +# See please https://docs.gradle.org/8.10/userguide/upgrading_version_8.html#minimum_daemon_jvm_version +OPENSEARCH_BUILD_JAVA=openjdk17 OPENSEARCH_RUNTIME_JAVA=java11 GRADLE_TASK=build GRADLE_EXTRA_ARGS= diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 1aefeee710f47..18a310862dfbb 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -11,17 +11,27 @@ # 3. Use the command palette to run the CODEOWNERS: Show owners of current file command, which will display all code owners for the current file. # Default ownership for all repo files -* @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah +* @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jainankitk @kotwanikunal @linuxpi @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah +/modules/lang-painless/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah +/modules/parent-join/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah /modules/transport-netty4/ @peternied /plugins/identity-shiro/ @peternied +/server/src/internalClusterTest/java/org/opensearch/index/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah +/server/src/internalClusterTest/java/org/opensearch/search/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah + /server/src/main/java/org/opensearch/extensions/ @peternied /server/src/main/java/org/opensearch/identity/ @peternied -/server/src/main/java/org/opensearch/threadpool/ @peternied +/server/src/main/java/org/opensearch/index/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah +/server/src/main/java/org/opensearch/search/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah +/server/src/main/java/org/opensearch/threadpool/ @jed326 @peternied /server/src/main/java/org/opensearch/transport/ @peternied -/.github/ @peternied +/server/src/test/java/org/opensearch/index/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah +/server/src/test/java/org/opensearch/search/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah + +/.github/ @jed326 @peternied /MAINTAINERS.md @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gaobinlong @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @peternied @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah diff --git a/.github/benchmark-configs.json b/.github/benchmark-configs.json index 5b44198cd3b8e..8f4bad040fe44 100644 --- a/.github/benchmark-configs.json +++ b/.github/benchmark-configs.json @@ -14,7 +14,8 @@ "cluster_configuration": { "size": "Single-Node", "data_instance_config": "4vCPU, 32G Mem, 16G Heap" - } + }, + "baseline_cluster_config": "x64-r5.xlarge-single-node-1-shard-0-replica-baseline" }, "id_2": { "description": "Indexing only configuration for HTTP_LOGS workload", @@ -30,7 +31,8 @@ "cluster_configuration": { "size": "Single-Node", "data_instance_config": "4vCPU, 32G Mem, 16G Heap" - } + }, + "baseline_cluster_config": "x64-r5.xlarge-single-node-1-shard-0-replica-baseline" }, "id_3": { "description": "Search only test-procedure for NYC_TAXIS, uses snapshot to restore the data for OS-3.0.0", @@ -46,7 +48,8 @@ "cluster_configuration": { "size": "Single-Node", "data_instance_config": "4vCPU, 32G Mem, 16G Heap" - } + }, + "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" }, "id_4": { "description": "Search only test-procedure for HTTP_LOGS, uses snapshot to restore the data for OS-3.0.0", @@ -62,10 +65,11 @@ "cluster_configuration": { "size": "Single-Node", "data_instance_config": "4vCPU, 32G Mem, 16G Heap" - } + }, + "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" }, "id_5": { - "description": "Search only test-procedure for HTTP_LOGS, uses snapshot to restore the data for OS-3.0.0", + "description": "Search only test-procedure for big5, uses snapshot to restore the data for OS-3.0.0", "supported_major_versions": ["3"], "cluster-benchmark-configs": { "SINGLE_NODE_CLUSTER": "true", @@ -78,7 +82,8 @@ "cluster_configuration": { "size": "Single-Node", "data_instance_config": "4vCPU, 32G Mem, 16G Heap" - } + }, + "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" }, "id_6": { "description": "Search only test-procedure for NYC_TAXIS, uses snapshot to restore the data for OS-2.x", @@ -94,7 +99,8 @@ "cluster_configuration": { "size": "Single-Node", "data_instance_config": "4vCPU, 32G Mem, 16G Heap" - } + }, + "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" }, "id_7": { "description": "Search only test-procedure for HTTP_LOGS, uses snapshot to restore the data for OS-2.x", @@ -110,10 +116,11 @@ "cluster_configuration": { "size": "Single-Node", "data_instance_config": "4vCPU, 32G Mem, 16G Heap" - } + }, + "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" }, "id_8": { - "description": "Search only test-procedure for HTTP_LOGS, uses snapshot to restore the data for OS-2.x", + "description": "Search only test-procedure for big5, uses snapshot to restore the data for OS-2.x", "supported_major_versions": ["2"], "cluster-benchmark-configs": { "SINGLE_NODE_CLUSTER": "true", @@ -126,7 +133,8 @@ "cluster_configuration": { "size": "Single-Node", "data_instance_config": "4vCPU, 32G Mem, 16G Heap" - } + }, + "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" }, "id_9": { "description": "Indexing and search configuration for pmc workload", @@ -141,7 +149,8 @@ "cluster_configuration": { "size": "Single-Node", "data_instance_config": "4vCPU, 32G Mem, 16G Heap" - } + }, + "baseline_cluster_config": "x64-r5.xlarge-single-node-1-shard-0-replica-baseline" }, "id_10": { "description": "Indexing only configuration for stack-overflow workload", @@ -156,6 +165,7 @@ "cluster_configuration": { "size": "Single-Node", "data_instance_config": "4vCPU, 32G Mem, 16G Heap" - } + }, + "baseline_cluster_config": "x64-r5.xlarge-single-node-1-shard-0-replica-baseline" } } diff --git a/.github/workflows/add-performance-comment.yml b/.github/workflows/add-performance-comment.yml index fc272714c5628..6a310bff4c0a1 100644 --- a/.github/workflows/add-performance-comment.yml +++ b/.github/workflows/add-performance-comment.yml @@ -16,7 +16,7 @@ jobs: steps: - name: Add comment to PR - uses: actions/github-script@v6 + uses: actions/github-script@v7 with: github-token: ${{secrets.GITHUB_TOKEN}} script: | diff --git a/.github/workflows/assemble.yml b/.github/workflows/assemble.yml index 51ae075ffa2c9..200d9cf2d788b 100644 --- a/.github/workflows/assemble.yml +++ b/.github/workflows/assemble.yml @@ -16,6 +16,17 @@ jobs: with: java-version: ${{ matrix.java }} distribution: temurin + - name: Set up JDK 17 + # See please https://docs.gradle.org/8.10/userguide/upgrading_version_8.html#minimum_daemon_jvm_version + if: matrix.java == 11 + uses: actions/setup-java@v4 + with: + java-version: 17 + distribution: temurin + - name: Set JAVA${{ matrix.java }}_HOME + shell: bash + run: | + echo "JAVA${{ matrix.java }}_HOME=$JAVA_HOME_${{ matrix.java }}_${{ runner.arch }}" >> $GITHUB_ENV - name: Setup docker (missing on MacOS) id: setup_docker if: runner.os == 'macos' @@ -30,10 +41,11 @@ jobs: # Report success even if previous step failed (Docker on MacOS runner is very unstable) exit 0; - name: Run Gradle (assemble) + shell: bash if: runner.os != 'macos' run: | - ./gradlew assemble --parallel --no-build-cache -PDISABLE_BUILD_CACHE + ./gradlew assemble --parallel --no-build-cache -PDISABLE_BUILD_CACHE -Druntime.java=${{ matrix.java }} - name: Run Gradle (assemble) if: runner.os == 'macos' && steps.setup_docker.outcome == 'success' run: | - ./gradlew assemble --parallel --no-build-cache -PDISABLE_BUILD_CACHE + ./gradlew assemble --parallel --no-build-cache -PDISABLE_BUILD_CACHE -Druntime.java=${{ matrix.java }} diff --git a/.github/workflows/benchmark-pull-request.yml b/.github/workflows/benchmark-pull-request.yml index 47abcc1178572..c494df6e27ce3 100644 --- a/.github/workflows/benchmark-pull-request.yml +++ b/.github/workflows/benchmark-pull-request.yml @@ -25,7 +25,7 @@ jobs: echo "USER_TAGS=pull_request_number:${{ github.event.issue.number }},repository:OpenSearch" >> $GITHUB_ENV - name: Check comment format id: check_comment - uses: actions/github-script@v6 + uses: actions/github-script@v7 with: script: | const fs = require('fs'); @@ -60,9 +60,12 @@ jobs: for (const [key, value] of Object.entries(clusterBenchmarkConfigs)) { core.exportVariable(key, value); } + if (benchmarkConfigs[configId].hasOwnProperty('baseline_cluster_config')) { + core.exportVariable('BASELINE_CLUSTER_CONFIG', benchmarkConfigs[configId]['baseline_cluster_config']); + } - name: Post invalid format comment if: steps.check_comment.outputs.invalid == 'true' - uses: actions/github-script@v6 + uses: actions/github-script@v7 with: github-token: ${{secrets.GITHUB_TOKEN}} script: | @@ -105,13 +108,25 @@ jobs: echo "prHeadRepo=$headRepo" >> $GITHUB_ENV echo "prHeadRefSha=$headRefSha" >> $GITHUB_ENV - id: get_approvers - run: | - echo "approvers=$(cat .github/CODEOWNERS | grep '^\*' | tr -d '* ' | sed 's/@/,/g' | sed 's/,//1')" >> $GITHUB_OUTPUT + uses: actions/github-script@v7 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + result-encoding: string + script: | + // Get the collaborators - filtered to maintainer permissions + const maintainersResponse = await github.request('GET /repos/{owner}/{repo}/collaborators', { + owner: context.repo.owner, + repo: context.repo.repo, + permission: 'maintain', + affiliation: 'all', + per_page: 100 + }); + return maintainersResponse.data.map(item => item.login).join(', '); - uses: trstringer/manual-approval@v1 - if: (!contains(steps.get_approvers.outputs.approvers, github.event.comment.user.login)) + if: (!contains(steps.get_approvers.outputs.result, github.event.comment.user.login)) with: secret: ${{ github.TOKEN }} - approvers: ${{ steps.get_approvers.outputs.approvers }} + approvers: ${{ steps.get_approvers.outputs.result }} minimum-approvals: 1 issue-title: 'Request to approve/deny benchmark run for PR #${{ env.PR_NUMBER }}' issue-body: "Please approve or deny the benchmark run for PR #${{ env.PR_NUMBER }}" @@ -123,9 +138,10 @@ jobs: ref: ${{ env.prHeadRefSha }} token: ${{ secrets.GITHUB_TOKEN }} - name: Setup Java - uses: actions/setup-java@v1 + uses: actions/setup-java@v4 with: java-version: 21 + distribution: 'temurin' - name: Build and Assemble OpenSearch from PR run: | ./gradlew :distribution:archives:linux-tar:assemble -Dbuild.snapshot=false @@ -150,7 +166,7 @@ jobs: cat $GITHUB_ENV bash opensearch-build/scripts/benchmark/benchmark-pull-request.sh ${{ secrets.JENKINS_PR_BENCHMARK_GENERIC_WEBHOOK_TOKEN }} - name: Update PR with Job Url - uses: actions/github-script@v6 + uses: actions/github-script@v7 with: github-token: ${{ secrets.GITHUB_TOKEN }} script: | diff --git a/.github/workflows/changelog_verifier.yml b/.github/workflows/changelog_verifier.yml index cf9343c2c3aac..cd0415119282c 100644 --- a/.github/workflows/changelog_verifier.yml +++ b/.github/workflows/changelog_verifier.yml @@ -27,18 +27,29 @@ jobs: continue-on-error: true - run: | # The check was possibly skipped leading to success for both the jobs + has_backport_label=${{ contains(join(github.event.pull_request.labels.*.name, ', '), 'backport')}} + has_breaking_label=${{ contains(join(github.event.pull_request.labels.*.name, ', '), '>breaking')}} + if [[ $has_breaking_label == true && $has_backport_label == true ]]; then + echo "error: Please make sure that the PR does not have a backport label associated with it when making breaking changes" + exit 1 + fi + if [[ ${{ steps.verify-changelog-3x.outcome }} == 'success' && ${{ steps.verify-changelog.outcome }} == 'success' ]]; then exit 0 fi - + if [[ ${{ steps.verify-changelog-3x.outcome }} == 'failure' && ${{ steps.verify-changelog.outcome }} == 'failure' ]]; then echo "error: Please ensure a changelog entry exists in CHANGELOG.md or CHANGELOG-3.0.md" exit 1 fi - + # Concatenates the labels and checks if the string contains "backport" - has_backport_label=${{ contains(join(github.event.pull_request.labels.*.name, ', '), 'backport')}} if [[ ${{ steps.verify-changelog.outcome }} == 'success' && $has_backport_label == false ]]; then echo "error: Please make sure that the PR has a backport label associated with it when making an entry to the CHANGELOG.md file" exit 1 fi + + if [[ ${{ steps.verify-changelog-3x.outcome }} == 'success' && $has_backport_label == true ]]; then + echo "error: Please make sure that the PR does not have a backport label associated with it when making an entry to the CHANGELOG-3.0.md file" + exit 1 + fi diff --git a/.github/workflows/dependabot_pr.yml b/.github/workflows/dependabot_pr.yml index cfd3ac81c5bd3..25abd99cadb96 100644 --- a/.github/workflows/dependabot_pr.yml +++ b/.github/workflows/dependabot_pr.yml @@ -22,6 +22,13 @@ jobs: with: token: ${{ steps.github_app_token.outputs.token }} + # See please https://docs.gradle.org/8.10/userguide/upgrading_version_8.html#minimum_daemon_jvm_version + - name: Set up JDK 17 + uses: actions/setup-java@v4 + with: + java-version: 17 + distribution: temurin + - name: Update Gradle SHAs run: | ./gradlew updateSHAs diff --git a/.github/workflows/gradle-check.yml b/.github/workflows/gradle-check.yml index 89d894403ff1a..1b9b30625eb83 100644 --- a/.github/workflows/gradle-check.yml +++ b/.github/workflows/gradle-check.yml @@ -20,7 +20,7 @@ jobs: - uses: actions/checkout@v4 - name: Get changed files id: changed-files-specific - uses: tj-actions/changed-files@v44 + uses: tj-actions/changed-files@v45 with: files_ignore: | release-notes/*.md diff --git a/.github/workflows/maintainer-approval.yml b/.github/workflows/maintainer-approval.yml index fdc2bf16937b4..34e8f57cc1878 100644 --- a/.github/workflows/maintainer-approval.yml +++ b/.github/workflows/maintainer-approval.yml @@ -9,7 +9,7 @@ jobs: runs-on: ubuntu-latest steps: - id: find-maintainers - uses: actions/github-script@v7.0.1 + uses: actions/github-script@v7 with: github-token: ${{ secrets.GITHUB_TOKEN }} result-encoding: string diff --git a/.github/workflows/precommit.yml b/.github/workflows/precommit.yml index 95ca49ac9cb43..7c65df1f677a5 100644 --- a/.github/workflows/precommit.yml +++ b/.github/workflows/precommit.yml @@ -8,7 +8,7 @@ jobs: strategy: matrix: java: [ 11, 17, 21 ] - os: [ubuntu-latest, windows-latest, macos-13] + os: [ubuntu-latest, windows-latest, macos-latest, macos-13] steps: - uses: actions/checkout@v4 - name: Set up JDK ${{ matrix.java }} @@ -17,6 +17,18 @@ jobs: java-version: ${{ matrix.java }} distribution: temurin cache: gradle + - name: Set up JDK 17 + # See please https://docs.gradle.org/8.10/userguide/upgrading_version_8.html#minimum_daemon_jvm_version + if: matrix.java == 11 + uses: actions/setup-java@v4 + with: + java-version: 17 + distribution: temurin + - name: Set JAVA${{ matrix.java }}_HOME + shell: bash + run: | + echo "JAVA${{ matrix.java }}_HOME=$JAVA_HOME_${{ matrix.java }}_${{ runner.arch }}" >> $GITHUB_ENV - name: Run Gradle (precommit) + shell: bash run: | - ./gradlew javadoc precommit --parallel + ./gradlew javadoc precommit --parallel -Druntime.java=${{ matrix.java }} diff --git a/.github/workflows/triage.yml b/.github/workflows/triage.yml index 83bf4926a8c2d..c305818bdb0a9 100644 --- a/.github/workflows/triage.yml +++ b/.github/workflows/triage.yml @@ -9,7 +9,7 @@ jobs: if: github.repository == 'opensearch-project/OpenSearch' runs-on: ubuntu-latest steps: - - uses: actions/github-script@v7.0.1 + - uses: actions/github-script@v7 with: script: | const { issue, repository } = context.payload; diff --git a/.github/workflows/version.yml b/.github/workflows/version.yml index 7f120b65d7c2e..2de54716256ff 100644 --- a/.github/workflows/version.yml +++ b/.github/workflows/version.yml @@ -129,7 +129,7 @@ jobs: - name: Create tracking issue id: create-issue - uses: actions/github-script@v7.0.1 + uses: actions/github-script@v7 with: script: | const body = ` diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index 06b761b1df8bd..48d978bede420 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -43,6 +43,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Remove LegacyESVersion.V_7_10_ Constants ([#5018](https://github.com/opensearch-project/OpenSearch/pull/5018)) - Remove Version.V_1_ Constants ([#5021](https://github.com/opensearch-project/OpenSearch/pull/5021)) - Remove custom Map, List and Set collection classes ([#6871](https://github.com/opensearch-project/OpenSearch/pull/6871)) +- Remove `index.store.hybrid.mmap.extensions` setting in favor of `index.store.hybrid.nio.extensions` setting ([#9392](https://github.com/opensearch-project/OpenSearch/pull/9392)) ### Fixed - Fix 'org.apache.hc.core5.http.ParseException: Invalid protocol version' under JDK 16+ ([#4827](https://github.com/opensearch-project/OpenSearch/pull/4827)) diff --git a/CHANGELOG.md b/CHANGELOG.md index e88a084f7d7f6..3dff44ed96dfd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,17 +5,76 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ## [Unreleased 2.x] ### Added +- [Offline Nodes] Adds offline-tasks library containing various interfaces to be used for Offline Background Tasks. ([#13574](https://github.com/opensearch-project/OpenSearch/pull/13574)) +- Fix for hasInitiatedFetching to fix allocation explain and manual reroute APIs (([#14972](https://github.com/opensearch-project/OpenSearch/pull/14972)) +- [Workload Management] Add queryGroupId to Task ([14708](https://github.com/opensearch-project/OpenSearch/pull/14708)) +- Add setting to ignore throttling nodes for allocation of unassigned primaries in remote restore ([#14991](https://github.com/opensearch-project/OpenSearch/pull/14991)) +- [Workload Management] Add Delete QueryGroup API Logic ([#14735](https://github.com/opensearch-project/OpenSearch/pull/14735)) +- [Streaming Indexing] Enhance RestClient with a new streaming API support ([#14437](https://github.com/opensearch-project/OpenSearch/pull/14437)) +- Add basic aggregation support for derived fields ([#14618](https://github.com/opensearch-project/OpenSearch/pull/14618)) +- [Workload Management] Add Create QueryGroup API Logic ([#14680](https://github.com/opensearch-project/OpenSearch/pull/14680))- [Workload Management] Add Create QueryGroup API Logic ([#14680](https://github.com/opensearch-project/OpenSearch/pull/14680)) +- Add ThreadContextPermission for markAsSystemContext and allow core to perform the method ([#15016](https://github.com/opensearch-project/OpenSearch/pull/15016)) +- Add ThreadContextPermission for stashAndMergeHeaders and stashWithOrigin ([#15039](https://github.com/opensearch-project/OpenSearch/pull/15039)) +- [Concurrent Segment Search] Support composite aggregations with scripting ([#15072](https://github.com/opensearch-project/OpenSearch/pull/15072)) +- Add `rangeQuery` and `regexpQuery` for `constant_keyword` field type ([#14711](https://github.com/opensearch-project/OpenSearch/pull/14711)) +- Add took time to request nodes stats ([#15054](https://github.com/opensearch-project/OpenSearch/pull/15054)) +- [Workload Management] Add Get QueryGroup API Logic ([14709](https://github.com/opensearch-project/OpenSearch/pull/14709)) +- [Workload Management] Add Settings for Workload Management feature ([#15028](https://github.com/opensearch-project/OpenSearch/pull/15028)) +- [Workload Management] QueryGroup resource tracking framework changes ([#13897](https://github.com/opensearch-project/OpenSearch/pull/13897)) +- Support filtering on a large list encoded by bitmap ([#14774](https://github.com/opensearch-project/OpenSearch/pull/14774)) +- Add slice execution listeners to SearchOperationListener interface ([#15153](https://github.com/opensearch-project/OpenSearch/pull/15153)) +- Add allowlist setting for ingest-geoip and ingest-useragent ([#15325](https://github.com/opensearch-project/OpenSearch/pull/15325)) +- Adding access to noSubMatches and noOverlappingMatches in Hyphenation ([#13895](https://github.com/opensearch-project/OpenSearch/pull/13895)) +- Add support for index level max slice count setting for concurrent segment search ([#15336](https://github.com/opensearch-project/OpenSearch/pull/15336)) +- Add concurrent search support for Derived Fields ([#15326](https://github.com/opensearch-project/OpenSearch/pull/15326)) ### Dependencies -- Bump `org.apache.commons:commons-lang3` from 3.14.0 to 3.15.0 ([#14861](https://github.com/opensearch-project/OpenSearch/pull/14861)) +- Bump `netty` from 4.1.111.Final to 4.1.112.Final ([#15081](https://github.com/opensearch-project/OpenSearch/pull/15081)) +- Bump `org.apache.commons:commons-lang3` from 3.14.0 to 3.16.0 ([#14861](https://github.com/opensearch-project/OpenSearch/pull/14861), [#15205](https://github.com/opensearch-project/OpenSearch/pull/15205)) +- OpenJDK Update (July 2024 Patch releases) ([#14998](https://github.com/opensearch-project/OpenSearch/pull/14998)) +- Bump `com.microsoft.azure:msal4j` from 1.16.1 to 1.17.0 ([#14995](https://github.com/opensearch-project/OpenSearch/pull/14995), [#15420](https://github.com/opensearch-project/OpenSearch/pull/15420)) +- Bump `actions/github-script` from 6 to 7 ([#14997](https://github.com/opensearch-project/OpenSearch/pull/14997)) +- Bump `org.tukaani:xz` from 1.9 to 1.10 ([#15110](https://github.com/opensearch-project/OpenSearch/pull/15110)) +- Bump `actions/setup-java` from 1 to 4 ([#15104](https://github.com/opensearch-project/OpenSearch/pull/15104)) +- Bump `org.apache.avro:avro` from 1.11.3 to 1.12.0 in /plugins/repository-hdfs ([#15119](https://github.com/opensearch-project/OpenSearch/pull/15119)) +- Bump `org.bouncycastle:bcpg-fips` from 1.0.7.1 to 2.0.9 ([#15103](https://github.com/opensearch-project/OpenSearch/pull/15103), [#15299](https://github.com/opensearch-project/OpenSearch/pull/15299)) +- Bump `com.azure:azure-core` from 1.49.1 to 1.51.0 ([#15111](https://github.com/opensearch-project/OpenSearch/pull/15111)) +- Bump `org.xerial.snappy:snappy-java` from 1.1.10.5 to 1.1.10.6 ([#15207](https://github.com/opensearch-project/OpenSearch/pull/15207)) +- Bump `com.azure:azure-xml` from 1.0.0 to 1.1.0 ([#15206](https://github.com/opensearch-project/OpenSearch/pull/15206)) +- Bump `reactor` from 3.5.19 to 3.5.20 ([#15262](https://github.com/opensearch-project/OpenSearch/pull/15262)) +- Bump `reactor-netty` from 1.1.21 to 1.1.22 ([#15262](https://github.com/opensearch-project/OpenSearch/pull/15262)) +- Bump `org.apache.kerby:kerb-admin` from 2.0.3 to 2.1.0 ([#15301](https://github.com/opensearch-project/OpenSearch/pull/15301)) +- Bump `com.azure:azure-core-http-netty` from 1.15.1 to 1.15.3 ([#15300](https://github.com/opensearch-project/OpenSearch/pull/15300)) +- Bump `com.gradle.develocity` from 3.17.6 to 3.18 ([#15297](https://github.com/opensearch-project/OpenSearch/pull/15297)) +- Bump `commons-cli:commons-cli` from 1.8.0 to 1.9.0 ([#15298](https://github.com/opensearch-project/OpenSearch/pull/15298)) +- Bump `opentelemetry` from 1.40.0 to 1.41.0 ([#15361](https://github.com/opensearch-project/OpenSearch/pull/15361)) +- Bump `opentelemetry-semconv` from 1.26.0-alpha to 1.27.0-alpha ([#15361](https://github.com/opensearch-project/OpenSearch/pull/15361)) +- Bump `tj-actions/changed-files` from 44 to 45 ([#15422](https://github.com/opensearch-project/OpenSearch/pull/15422)) +- Bump `dnsjava:dnsjava` from 3.6.0 to 3.6.1 ([#15418](https://github.com/opensearch-project/OpenSearch/pull/15418)) +- Bump `com.netflix.nebula.ospackage-base` from 11.9.1 to 11.10.0 ([#15419](https://github.com/opensearch-project/OpenSearch/pull/15419)) +- Bump `org.roaringbitmap:RoaringBitmap` from 1.1.0 to 1.2.1 ([#15423](https://github.com/opensearch-project/OpenSearch/pull/15423)) ### Changed +- Add lower limit for primary and replica batch allocators timeout ([#14979](https://github.com/opensearch-project/OpenSearch/pull/14979)) +- Optimize regexp-based include/exclude on aggregations when pattern matches prefixes ([#14371](https://github.com/opensearch-project/OpenSearch/pull/14371)) +- Replace and block usages of org.apache.logging.log4j.util.Strings ([#15238](https://github.com/opensearch-project/OpenSearch/pull/15238)) ### Deprecated ### Removed ### Fixed +- Fix constraint bug which allows more primary shards than average primary shards per index ([#14908](https://github.com/opensearch-project/OpenSearch/pull/14908)) +- Fix NPE when bulk ingest with empty pipeline ([#15033](https://github.com/opensearch-project/OpenSearch/pull/15033)) +- Fix missing value of FieldSort for unsigned_long ([#14963](https://github.com/opensearch-project/OpenSearch/pull/14963)) +- Fix delete index template failed when the index template matches a data stream but is unused ([#15080](https://github.com/opensearch-project/OpenSearch/pull/15080)) +- Fix array_index_out_of_bounds_exception when indexing documents with field name containing only dot ([#15126](https://github.com/opensearch-project/OpenSearch/pull/15126)) +- Fixed array field name omission in flat_object function for nested JSON ([#13620](https://github.com/opensearch-project/OpenSearch/pull/13620)) +- Fix range aggregation optimization ignoring top level queries ([#15194](https://github.com/opensearch-project/OpenSearch/pull/15194)) +- Fix incorrect parameter names in MinHash token filter configuration handling ([#15233](https://github.com/opensearch-project/OpenSearch/pull/15233)) +- Fix indexing error when flat_object field is explicitly null ([#15375](https://github.com/opensearch-project/OpenSearch/pull/15375)) +- Fix split response processor not included in allowlist ([#15393](https://github.com/opensearch-project/OpenSearch/pull/15393)) +- Fix unchecked cast in dynamic action map getter ([#15394](https://github.com/opensearch-project/OpenSearch/pull/15394)) ### Security diff --git a/MAINTAINERS.md b/MAINTAINERS.md index f77c69ddeff2a..4a8aa9305df74 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -5,10 +5,11 @@ This document contains a list of maintainers in this repo. See [opensearch-proje ## Current Maintainers | Maintainer | GitHub ID | Affiliation | -| ------------------------ | ------------------------------------------------------- | ----------- | +|--------------------------|---------------------------------------------------------|-------------| | Anas Alkouz | [anasalkouz](https://github.com/anasalkouz) | Amazon | | Andrew Ross | [andrross](https://github.com/andrross) | Amazon | | Andriy Redko | [reta](https://github.com/reta) | Aiven | +| Ankit Jain | [jainankitk](https://github.com/jainankitk) | Amazon | | Ashish Singh | [ashking94](https://github.com/ashking94) | Amazon | | Bukhtawar Khan | [Bukhtawar](https://github.com/Bukhtawar) | Amazon | | Charlotte Henkle | [CEHENKLE](https://github.com/CEHENKLE) | Amazon | @@ -18,9 +19,10 @@ This document contains a list of maintainers in this repo. See [opensearch-proje | Gaurav Bafna | [gbbafna](https://github.com/gbbafna) | Amazon | | Jay Deng | [jed326](https://github.com/jed326) | Amazon | | Kunal Kotwani | [kotwanikunal](https://github.com/kotwanikunal) | Amazon | +| Varun Bansal | [linuxpi](https://github.com/linuxpi) | Amazon | | Marc Handalian | [mch2](https://github.com/mch2) | Amazon | | Michael Froh | [msfroh](https://github.com/msfroh) | Amazon | -| Nick Knize | [nknize](https://github.com/nknize) | Amazon | +| Nick Knize | [nknize](https://github.com/nknize) | Lucenia | | Owais Kazi | [owaiskazi19](https://github.com/owaiskazi19) | Amazon | | Peter Nied | [peternied](https://github.com/peternied) | Amazon | | Rishikesh Pasham | [Rishikesh1159](https://github.com/Rishikesh1159) | Amazon | diff --git a/README.md b/README.md index 17af2911b9221..5d4a9a671c013 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ [![Security Vulnerabilities](https://img.shields.io/github/issues/opensearch-project/OpenSearch/security%20vulnerability?labelColor=red)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"security%20vulnerability") [![Open Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch)](https://github.com/opensearch-project/OpenSearch/issues) [![Open Pull Requests](https://img.shields.io/github/issues-pr/opensearch-project/OpenSearch)](https://github.com/opensearch-project/OpenSearch/pulls) -[![2.14.1 Open Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch/v2.14.1)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"v2.14.1") +[![2.17.0 Open Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch/v2.17.0)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"v2.17.0") [![3.0.0 Open Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch/v3.0.0)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"v3.0.0") [![GHA gradle check](https://github.com/opensearch-project/OpenSearch/actions/workflows/gradle-check.yml/badge.svg)](https://github.com/opensearch-project/OpenSearch/actions/workflows/gradle-check.yml) [![GHA validate pull request](https://github.com/opensearch-project/OpenSearch/actions/workflows/wrapper.yml/badge.svg)](https://github.com/opensearch-project/OpenSearch/actions/workflows/wrapper.yml) diff --git a/TRIAGING.md b/TRIAGING.md index c7c07a8ce30bd..dddcbc15394ab 100644 --- a/TRIAGING.md +++ b/TRIAGING.md @@ -1,6 +1,6 @@ -The maintainers of the OpenSearch Repo seek to promote an inclusive and engaged community of contributors. In order to facilitate this, weekly triage meetings are open-to-all and attendance is encouraged for anyone who hopes to contribute, discuss an issue, or learn more about the project. There are several weekly triage meetings scoped to the following component areas: Search, Storage, Cluster Manager, and finally "Core" as a catch-all for all other issues. To learn more about contributing to the OpenSearch Repo visit the [Contributing](./CONTRIBUTING.md) documentation. +The maintainers of the OpenSearch Repo seek to promote an inclusive and engaged community of contributors. In order to facilitate this, weekly triage meetings are open-to-all and attendance is encouraged for anyone who hopes to contribute, discuss an issue, or learn more about the project. There are several weekly triage meetings scoped to the following component areas: Search, Storage, and Cluster Manager. To learn more about contributing to the OpenSearch Repo visit the [Contributing](./CONTRIBUTING.md) documentation. ### Do I need to attend for my issue to be addressed/triaged? @@ -14,7 +14,7 @@ Each meeting we seek to address all new issues. However, should we run out of ti ### How do I join a Triage meeting? - Check the [OpenSearch Meetup Group](https://www.meetup.com/opensearch/) for the latest schedule and details for joining each meeting. Each component area has its own meetup series: [Search](https://www.meetup.com/opensearch/events/300929493/), [Storage](https://www.meetup.com/opensearch/events/299907409/), [Cluster Manager](https://www.meetup.com/opensearch/events/301082218/), [Indexing](https://www.meetup.com/opensearch/events/301734024/), and [Core](https://www.meetup.com/opensearch/events/301061009/). + Check the [OpenSearch Meetup Group](https://www.meetup.com/opensearch/) for the latest schedule and details for joining each meeting. Each component area has its own meetup series: [Search](https://www.meetup.com/opensearch/events/300929493/), [Storage](https://www.meetup.com/opensearch/events/299907409/), [Cluster Manager](https://www.meetup.com/opensearch/events/301082218/), and [Indexing](https://www.meetup.com/opensearch/events/301734024/). After joining the virtual meeting, you can enable your video / voice to join the discussion. If you do not have a webcam or microphone available, you can still join in via the text chat. @@ -31,11 +31,11 @@ Meeting structure may vary slightly, but the general structure is as follows: - [Search](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3Auntriaged+label%3A%22Search%22%2C%22Search%3ARemote+Search%22%2C%22Search%3AResiliency%22%2C%22Search%3APerformance%22%2C%22Search%3ARelevance%22%2C%22Search%3AAggregations%22%2C%22Search%3AQuery+Capabilities%22%2C%22Search%3AQuery+Insights%22%2C%22Search%3ASearchable+Snapshots%22%2C%22Search%3AUser+Behavior+Insights%22) - [Indexing](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3Auntriaged+label%3A%22Indexing%3AReplication%22%2C%22Indexing%22%2C%22Indexing%3APerformance%22%2C%22Indexing+%26+Search%22%2C) - [Storage](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3Auntriaged+label%3AStorage%2C%22Storage%3AResiliency%22%2C%22Storage%3APerformance%22%2C%22Storage%3ASnapshots%22%2C%22Storage%3ARemote%22%2C%22Storage%3ADurability%22) - - [Cluster Manager](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3Auntriaged+label%3A%22Cluster+Manager%22%2C%22ClusterManager%3ARemoteState%22) - - [Core](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3Auntriaged+-label%3A%22Search%22%2C%22Search%3ARemote+Search%22%2C%22Search%3AResiliency%22%2C%22Search%3APerformance%22%2C%22Search%3ARelevance%22%2C%22Search%3AAggregations%22%2C%22Search%3AQuery+Capabilities%22%2C%22Search%3AQuery+Insights%22%2C%22Search%3ASearchable+Snapshots%22%2C%22Search%3AUser+Behavior+Insights%22%2C%22Storage%22%2C%22Storage%3AResiliency%22%2C%22Storage%3APerformance%22%2C%22Storage%3ASnapshots%22%2C%22Storage%3ARemote%22%2C%22Storage%3ADurability%22%2C%22Cluster+Manager%22%2C%22ClusterManager%3ARemoteState%22%2C%22Indexing%3AReplication%22%2C%22Indexing%22%2C%22Indexing%3APerformance%22%2C%22Indexing+%26+Search%22) + - [Cluster Manager](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3Auntriaged+label%3A%22Cluster+Manager%22%2C%22ClusterManager%3ARemoteState%22%2C%22ShardManagement%3AResiliency%22%2C%22ShardManagement%3AInsights%22%2C%22ShardManagement%3ASizing%22%2C%22ShardManagement%3APerformance%22%2C%22ShardManagement%3APlacement%22%2C%22ShardManagement%3ARouting%22) + - [Core](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3Auntriaged+-label%3A%22Search%22%2C%22Search%3ARemote+Search%22%2C%22Search%3AResiliency%22%2C%22Search%3APerformance%22%2C%22Search%3ARelevance%22%2C%22Search%3AAggregations%22%2C%22Search%3AQuery+Capabilities%22%2C%22Search%3AQuery+Insights%22%2C%22Search%3ASearchable+Snapshots%22%2C%22Search%3AUser+Behavior+Insights%22%2C%22Storage%22%2C%22Storage%3AResiliency%22%2C%22Storage%3APerformance%22%2C%22Storage%3ASnapshots%22%2C%22Storage%3ARemote%22%2C%22Storage%3ADurability%22%2C%22Cluster+Manager%22%2C%22ClusterManager%3ARemoteState%22%2C%22ShardManagement%3AResiliency%22%2C%22ShardManagement%3AInsights%22%2C%22ShardManagement%3ASizing%22%2C%22ShardManagement%3APerformance%22%2C%22ShardManagement%3APlacement%22%2C%22ShardManagement%3ARouting%22%2C%22Indexing%3AReplication%22%2C%22Indexing%22%2C%22Indexing%3APerformance%22%2C%22Indexing+%26+Search%22) 5. **Attendee Requests:** An opportunity for any meeting member to request consideration of an issue or pull request. 6. **Open Discussion:** Attendees can bring up any topics not already covered by filed issues or pull requests. -7. **Review of Old Untriaged Issues:** Time permitting, each meeting will look at all [untriaged issues older than 14 days](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3Auntriaged+created%3A%3C2024-05-20) to prevent issues from falling through the cracks (note the GitHub API does not allow for relative times, so the date in this search must be updated every meeting). +7. **Review of Old Untriaged Issues:** Look at all [untriaged issues older than 14 days](https://peternied.github.io/redirect/issue_search.html?owner=opensearch-project&repo=OpenSearch&tag=untriaged&created-since-days=14) to prevent issues from falling through the cracks. ### What is the role of the facilitator? diff --git a/benchmarks/src/main/java/org/opensearch/benchmark/routing/allocation/RerouteBenchmark.java b/benchmarks/src/main/java/org/opensearch/benchmark/routing/allocation/RerouteBenchmark.java new file mode 100644 index 0000000000000..e54bca579423b --- /dev/null +++ b/benchmarks/src/main/java/org/opensearch/benchmark/routing/allocation/RerouteBenchmark.java @@ -0,0 +1,135 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.benchmark.routing.allocation; + +import org.opensearch.Version; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.allocation.AllocationService; +import org.opensearch.common.logging.LogConfigurator; +import org.opensearch.common.settings.Settings; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static org.opensearch.cluster.routing.ShardRoutingState.INITIALIZING; + +@Fork(1) +@Warmup(iterations = 3) +@Measurement(iterations = 3) +@BenchmarkMode(Mode.AverageTime) +@OutputTimeUnit(TimeUnit.MILLISECONDS) +@State(Scope.Benchmark) +@SuppressWarnings("unused") // invoked by benchmarking framework +public class RerouteBenchmark { + @Param({ + // indices| nodes + " 10000| 500|", }) + public String indicesNodes = "1|1"; + public int numIndices; + public int numNodes; + public int numShards = 10; + public int numReplicas = 1; + + private AllocationService allocationService; + private ClusterState initialClusterState; + + @Setup + public void setUp() throws Exception { + LogConfigurator.setNodeName("test"); + final String[] params = indicesNodes.split("\\|"); + numIndices = toInt(params[0]); + numNodes = toInt(params[1]); + + int totalShardCount = (numReplicas + 1) * numShards * numIndices; + Metadata.Builder mb = Metadata.builder(); + for (int i = 1; i <= numIndices; i++) { + mb.put( + IndexMetadata.builder("test_" + i) + .settings(Settings.builder().put("index.version.created", Version.CURRENT)) + .numberOfShards(numShards) + .numberOfReplicas(numReplicas) + ); + } + + Metadata metadata = mb.build(); + RoutingTable.Builder rb = RoutingTable.builder(); + for (int i = 1; i <= numIndices; i++) { + rb.addAsNew(metadata.index("test_" + i)); + } + RoutingTable routingTable = rb.build(); + initialClusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .routingTable(routingTable) + .nodes(setUpClusterNodes(numNodes)) + .build(); + } + + @Benchmark + public ClusterState measureShardAllocationEmptyCluster() throws Exception { + ClusterState clusterState = initialClusterState; + allocationService = Allocators.createAllocationService( + Settings.builder() + .put("cluster.routing.allocation.awareness.attributes", "zone") + .put("cluster.routing.allocation.load_awareness.provisioned_capacity", numNodes) + .put("cluster.routing.allocation.load_awareness.skew_factor", "50") + .put("cluster.routing.allocation.node_concurrent_recoveries", "2") + .build() + ); + clusterState = allocationService.reroute(clusterState, "reroute"); + while (clusterState.getRoutingNodes().hasUnassignedShards()) { + clusterState = startInitializingShardsAndReroute(allocationService, clusterState); + } + return clusterState; + } + + private int toInt(String v) { + return Integer.valueOf(v.trim()); + } + + private DiscoveryNodes.Builder setUpClusterNodes(int nodes) { + DiscoveryNodes.Builder nb = DiscoveryNodes.builder(); + for (int i = 1; i <= nodes; i++) { + Map attributes = new HashMap<>(); + attributes.put("zone", "zone_" + (i % 3)); + nb.add(Allocators.newNode("node_0_" + i, attributes)); + } + return nb; + } + + private static ClusterState startInitializingShardsAndReroute(AllocationService allocationService, ClusterState clusterState) { + return startShardsAndReroute(allocationService, clusterState, clusterState.routingTable().shardsWithState(INITIALIZING)); + } + + private static ClusterState startShardsAndReroute( + AllocationService allocationService, + ClusterState clusterState, + List initializingShards + ) { + return allocationService.reroute(allocationService.applyStartedShards(clusterState, initializingShards), "reroute after starting"); + } +} diff --git a/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionBwcSetupPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionBwcSetupPlugin.java index 6892af1b17f97..0502280cb69ad 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionBwcSetupPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionBwcSetupPlugin.java @@ -158,7 +158,17 @@ private static List resolveArchiveProjects(File checkoutDir projects.addAll(asList("deb", "rpm")); if (bwcVersion.onOrAfter("7.0.0")) { // starting with 7.0 we bundle a jdk which means we have platform-specific archives - projects.addAll(asList("darwin-tar", "linux-tar", "windows-zip")); + projects.addAll( + asList( + "darwin-tar", + "darwin-arm64-tar", + "linux-tar", + "linux-arm64-tar", + "linux-ppc64le-tar", + "linux-s390x-tar", + "windows-zip" + ) + ); } else { // prior to 7.0 we published only a single zip and tar archives projects.addAll(asList("zip", "tar")); } diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java index b2b3e3003e572..8d5ce9143cbac 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java @@ -77,9 +77,9 @@ import java.util.stream.Stream; public class DistroTestPlugin implements Plugin { - private static final String SYSTEM_JDK_VERSION = "21.0.3+9"; + private static final String SYSTEM_JDK_VERSION = "21.0.4+7"; private static final String SYSTEM_JDK_VENDOR = "adoptium"; - private static final String GRADLE_JDK_VERSION = "21.0.3+9"; + private static final String GRADLE_JDK_VERSION = "21.0.4+7"; private static final String GRADLE_JDK_VENDOR = "adoptium"; // all distributions used by distro tests. this is temporary until tests are per distribution diff --git a/buildSrc/src/main/resources/forbidden/opensearch-all-signatures.txt b/buildSrc/src/main/resources/forbidden/opensearch-all-signatures.txt index f9f24fd1e2367..199e206450178 100644 --- a/buildSrc/src/main/resources/forbidden/opensearch-all-signatures.txt +++ b/buildSrc/src/main/resources/forbidden/opensearch-all-signatures.txt @@ -17,6 +17,9 @@ java.nio.file.Paths @ Use org.opensearch.common.io.PathUtils.get() instead. java.nio.file.FileSystems#getDefault() @ use org.opensearch.common.io.PathUtils.getDefaultFileSystem() instead. +joptsimple.internal.Strings @ use org.opensearch.core.common.Strings instead. +org.apache.logging.log4j.util.Strings @ use org.opensearch.core.common.Strings instead. + java.nio.file.Files#getFileStore(java.nio.file.Path) @ Use org.opensearch.env.Environment.getFileStore() instead, impacted by JDK-8034057 java.nio.file.Files#isWritable(java.nio.file.Path) @ Use org.opensearch.env.Environment.isWritable() instead, impacted by JDK-8034057 diff --git a/buildSrc/src/main/resources/minimumCompilerVersion b/buildSrc/src/main/resources/minimumCompilerVersion index 8351c19397f4f..98d9bcb75a685 100644 --- a/buildSrc/src/main/resources/minimumCompilerVersion +++ b/buildSrc/src/main/resources/minimumCompilerVersion @@ -1 +1 @@ -14 +17 diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 855ccc1f87413..ccec8e2891a65 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -2,7 +2,7 @@ opensearch = 3.0.0 lucene = 9.12.0-snapshot-847316d bundled_jdk_vendor = adoptium -bundled_jdk = 21.0.3+9 +bundled_jdk = 21.0.4+7 # optional dependencies spatial4j = 0.7 @@ -29,16 +29,16 @@ hdrhistogram = 2.2.2 # when updating the JNA version, also update the version in buildSrc/build.gradle jna = 5.13.0 -netty = 4.1.111.Final +netty = 4.1.112.Final joda = 2.12.7 # project reactor -reactor_netty = 1.1.21 -reactor = 3.5.19 +reactor_netty = 1.1.22 +reactor = 3.5.20 # client dependencies -httpclient5 = 5.2.1 -httpcore5 = 5.2.2 +httpclient5 = 5.2.3 +httpcore5 = 5.2.5 httpclient = 4.5.14 httpcore = 4.4.16 httpasyncclient = 4.1.5 @@ -74,5 +74,5 @@ jzlib = 1.1.3 resteasy = 6.2.4.Final # opentelemetry dependencies -opentelemetry = 1.40.0 -opentelemetrysemconv = 1.26.0-alpha +opentelemetry = 1.41.0 +opentelemetrysemconv = 1.27.0-alpha diff --git a/client/rest/build.gradle b/client/rest/build.gradle index f18df65dfddfa..93faf0024b51e 100644 --- a/client/rest/build.gradle +++ b/client/rest/build.gradle @@ -47,10 +47,15 @@ dependencies { api "org.apache.httpcomponents.client5:httpclient5:${versions.httpclient5}" api "org.apache.httpcomponents.core5:httpcore5:${versions.httpcore5}" api "org.apache.httpcomponents.core5:httpcore5-h2:${versions.httpcore5}" + api "org.apache.httpcomponents.core5:httpcore5-reactive:${versions.httpcore5}" api "commons-codec:commons-codec:${versions.commonscodec}" api "commons-logging:commons-logging:${versions.commonslogging}" api "org.slf4j:slf4j-api:${versions.slf4j}" + // reactor + api "io.projectreactor:reactor-core:${versions.reactor}" + api "org.reactivestreams:reactive-streams:${versions.reactivestreams}" + testImplementation project(":client:test") testImplementation "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" testImplementation "junit:junit:${versions.junit}" @@ -93,22 +98,52 @@ testingConventions { } } -thirdPartyAudit.ignoreMissingClasses( - 'org.conscrypt.Conscrypt', - 'org.slf4j.impl.StaticLoggerBinder', - 'org.slf4j.impl.StaticMDCBinder', - 'org.slf4j.impl.StaticMarkerBinder', - //commons-logging optional dependencies - 'org.apache.avalon.framework.logger.Logger', - 'org.apache.log.Hierarchy', - 'org.apache.log.Logger', - 'org.apache.log4j.Level', - 'org.apache.log4j.Logger', - 'org.apache.log4j.Priority', - //commons-logging provided dependencies - 'javax.servlet.ServletContextEvent', - 'javax.servlet.ServletContextListener' -) +thirdPartyAudit { + ignoreMissingClasses( + 'org.conscrypt.Conscrypt', + 'org.slf4j.impl.StaticLoggerBinder', + 'org.slf4j.impl.StaticMDCBinder', + 'org.slf4j.impl.StaticMarkerBinder', + //commons-logging optional dependencies + 'org.apache.avalon.framework.logger.Logger', + 'org.apache.log.Hierarchy', + 'org.apache.log.Logger', + 'org.apache.log4j.Level', + 'org.apache.log4j.Logger', + 'org.apache.log4j.Priority', + //commons-logging provided dependencies + 'javax.servlet.ServletContextEvent', + 'javax.servlet.ServletContextListener', + 'io.micrometer.context.ContextAccessor', + 'io.micrometer.context.ContextRegistry', + 'io.micrometer.context.ContextSnapshot', + 'io.micrometer.context.ContextSnapshot$Scope', + 'io.micrometer.context.ContextSnapshotFactory', + 'io.micrometer.context.ContextSnapshotFactory$Builder', + 'io.micrometer.context.ThreadLocalAccessor', + 'io.micrometer.core.instrument.Clock', + 'io.micrometer.core.instrument.Counter', + 'io.micrometer.core.instrument.Counter$Builder', + 'io.micrometer.core.instrument.DistributionSummary', + 'io.micrometer.core.instrument.DistributionSummary$Builder', + 'io.micrometer.core.instrument.Meter', + 'io.micrometer.core.instrument.MeterRegistry', + 'io.micrometer.core.instrument.Metrics', + 'io.micrometer.core.instrument.Tag', + 'io.micrometer.core.instrument.Tags', + 'io.micrometer.core.instrument.Timer', + 'io.micrometer.core.instrument.Timer$Builder', + 'io.micrometer.core.instrument.Timer$Sample', + 'io.micrometer.core.instrument.binder.jvm.ExecutorServiceMetrics', + 'io.micrometer.core.instrument.composite.CompositeMeterRegistry', + 'io.micrometer.core.instrument.search.Search', + 'reactor.blockhound.BlockHound$Builder', + 'reactor.blockhound.integration.BlockHoundIntegration' + ) + ignoreViolations( + 'reactor.core.publisher.Traces$SharedSecretsCallSiteSupplierFactory$TracingException' + ) +} tasks.withType(JavaCompile) { // Suppressing '[options] target value 8 is obsolete and will be removed in a future release' diff --git a/client/rest/licenses/httpclient5-5.2.1.jar.sha1 b/client/rest/licenses/httpclient5-5.2.1.jar.sha1 deleted file mode 100644 index 3555fe22f8e12..0000000000000 --- a/client/rest/licenses/httpclient5-5.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0c900514d3446d9ce5d9dbd90c21192048125440 \ No newline at end of file diff --git a/client/rest/licenses/httpclient5-5.2.3.jar.sha1 b/client/rest/licenses/httpclient5-5.2.3.jar.sha1 new file mode 100644 index 0000000000000..43e233e72001a --- /dev/null +++ b/client/rest/licenses/httpclient5-5.2.3.jar.sha1 @@ -0,0 +1 @@ +5d753a99d299756998a08c488f2efdf9cf26198e \ No newline at end of file diff --git a/client/rest/licenses/httpcore5-5.2.2.jar.sha1 b/client/rest/licenses/httpcore5-5.2.2.jar.sha1 deleted file mode 100644 index b641256c7d4a4..0000000000000 --- a/client/rest/licenses/httpcore5-5.2.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6da28f5aa6c2b129ef49632e041a5203ce7507b2 \ No newline at end of file diff --git a/client/rest/licenses/httpcore5-5.2.5.jar.sha1 b/client/rest/licenses/httpcore5-5.2.5.jar.sha1 new file mode 100644 index 0000000000000..ca97e8612ea39 --- /dev/null +++ b/client/rest/licenses/httpcore5-5.2.5.jar.sha1 @@ -0,0 +1 @@ +dab1e18842971a45ca8942491ce005ab86a028d7 \ No newline at end of file diff --git a/client/rest/licenses/httpcore5-h2-5.2.2.jar.sha1 b/client/rest/licenses/httpcore5-h2-5.2.2.jar.sha1 deleted file mode 100644 index 94bc0fa49bdb0..0000000000000 --- a/client/rest/licenses/httpcore5-h2-5.2.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -54ee1ed58fe8ac40be1083ea9873a6c734939ab9 \ No newline at end of file diff --git a/client/rest/licenses/httpcore5-h2-5.2.5.jar.sha1 b/client/rest/licenses/httpcore5-h2-5.2.5.jar.sha1 new file mode 100644 index 0000000000000..bb40fe65854f6 --- /dev/null +++ b/client/rest/licenses/httpcore5-h2-5.2.5.jar.sha1 @@ -0,0 +1 @@ +09425df4d1365cee86a8e031a036bdca4343da4b \ No newline at end of file diff --git a/client/rest/licenses/httpcore5-reactive-5.2.5.jar.sha1 b/client/rest/licenses/httpcore5-reactive-5.2.5.jar.sha1 new file mode 100644 index 0000000000000..ab9241fc93d45 --- /dev/null +++ b/client/rest/licenses/httpcore5-reactive-5.2.5.jar.sha1 @@ -0,0 +1 @@ +f68949965075b957c12b4c1ef89fd4bab2a0fdb1 \ No newline at end of file diff --git a/client/rest/licenses/httpcore5-reactive-LICENSE.txt b/client/rest/licenses/httpcore5-reactive-LICENSE.txt new file mode 100644 index 0000000000000..32f01eda18fe9 --- /dev/null +++ b/client/rest/licenses/httpcore5-reactive-LICENSE.txt @@ -0,0 +1,558 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + +========================================================================= + +This project includes Public Suffix List copied from + +licensed under the terms of the Mozilla Public License, v. 2.0 + +Full license text: + +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/client/rest/licenses/httpcore5-reactive-NOTICE.txt b/client/rest/licenses/httpcore5-reactive-NOTICE.txt new file mode 100644 index 0000000000000..fcf14beb5c1ec --- /dev/null +++ b/client/rest/licenses/httpcore5-reactive-NOTICE.txt @@ -0,0 +1,8 @@ + +Apache HttpComponents Core Reactive Extensions +Copyright 2005-2021 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + diff --git a/client/rest/licenses/reactive-streams-1.0.4.jar.sha1 b/client/rest/licenses/reactive-streams-1.0.4.jar.sha1 new file mode 100644 index 0000000000000..45a80e3f7e361 --- /dev/null +++ b/client/rest/licenses/reactive-streams-1.0.4.jar.sha1 @@ -0,0 +1 @@ +3864a1320d97d7b045f729a326e1e077661f31b7 \ No newline at end of file diff --git a/client/rest/licenses/reactive-streams-LICENSE.txt b/client/rest/licenses/reactive-streams-LICENSE.txt new file mode 100644 index 0000000000000..1e3c7e7c77495 --- /dev/null +++ b/client/rest/licenses/reactive-streams-LICENSE.txt @@ -0,0 +1,21 @@ +MIT No Attribution + +Copyright 2014 Reactive Streams + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/client/rest/licenses/reactive-streams-NOTICE.txt b/client/rest/licenses/reactive-streams-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/client/rest/licenses/reactor-core-3.5.20.jar.sha1 b/client/rest/licenses/reactor-core-3.5.20.jar.sha1 new file mode 100644 index 0000000000000..0c80be89f66c8 --- /dev/null +++ b/client/rest/licenses/reactor-core-3.5.20.jar.sha1 @@ -0,0 +1 @@ +1fc0f91e2b93778a974339d2c24363d7f34f90b4 \ No newline at end of file diff --git a/client/rest/licenses/reactor-core-LICENSE.txt b/client/rest/licenses/reactor-core-LICENSE.txt new file mode 100644 index 0000000000000..e5583c184e67a --- /dev/null +++ b/client/rest/licenses/reactor-core-LICENSE.txt @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/client/rest/licenses/reactor-core-NOTICE.txt b/client/rest/licenses/reactor-core-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/client/rest/src/main/java/org/opensearch/client/Cancellable.java b/client/rest/src/main/java/org/opensearch/client/Cancellable.java index 56e31a3742f35..d087c60927e3e 100644 --- a/client/rest/src/main/java/org/opensearch/client/Cancellable.java +++ b/client/rest/src/main/java/org/opensearch/client/Cancellable.java @@ -34,6 +34,8 @@ import org.apache.hc.client5.http.classic.methods.HttpUriRequestBase; import org.apache.hc.core5.concurrent.CancellableDependency; +import java.io.IOException; +import java.util.concurrent.Callable; import java.util.concurrent.CancellationException; /** @@ -77,7 +79,7 @@ public synchronized boolean cancel() { } /** - * Executes some arbitrary code iff the on-going request has not been cancelled, otherwise throws {@link CancellationException}. + * Executes some arbitrary code if the on-going request has not been cancelled, otherwise throws {@link CancellationException}. * This is needed to guarantee that cancelling a request works correctly even in case {@link #cancel()} is called between different * attempts of the same request. The low-level client reuses the same instance of the {@link CancellableDependency} by calling * {@link HttpUriRequestBase#reset()} between subsequent retries. The {@link #cancel()} method can be called at anytime, @@ -95,6 +97,31 @@ synchronized void runIfNotCancelled(Runnable runnable) { runnable.run(); } + /** + * Executes some arbitrary code if the on-going request has not been cancelled, otherwise throws {@link CancellationException}. + * This is needed to guarantee that cancelling a request works correctly even in case {@link #cancel()} is called between different + * attempts of the same request. The low-level client reuses the same instance of the {@link CancellableDependency} by calling + * {@link HttpUriRequestBase#reset()} between subsequent retries. The {@link #cancel()} method can be called at anytime, + * and we need to handle the case where it gets called while there is no request being executed as one attempt may have failed and + * the subsequent attempt has not been started yet. + * If the request has already been cancelled we don't go ahead with the next attempt, and artificially raise the + * {@link CancellationException}, otherwise we run the provided {@link Runnable} which will reset the request and send the next attempt. + * Note that this method must be synchronized as well as the {@link #cancel()} method, to prevent a request from being cancelled + * when there is no future to cancel, which would make cancelling the request a no-op. + */ + synchronized T callIfNotCancelled(Callable callable) throws IOException { + if (this.httpRequest.isCancelled()) { + throw newCancellationException(); + } + try { + return callable.call(); + } catch (final IOException ex) { + throw ex; + } catch (final Exception ex) { + throw new IOException(ex); + } + } + static CancellationException newCancellationException() { return new CancellationException("request was cancelled"); } diff --git a/client/rest/src/main/java/org/opensearch/client/Response.java b/client/rest/src/main/java/org/opensearch/client/Response.java index b062d937ed630..cb92e33e49156 100644 --- a/client/rest/src/main/java/org/opensearch/client/Response.java +++ b/client/rest/src/main/java/org/opensearch/client/Response.java @@ -40,11 +40,8 @@ import org.apache.hc.core5.http.message.RequestLine; import org.apache.hc.core5.http.message.StatusLine; -import java.util.ArrayList; import java.util.List; import java.util.Objects; -import java.util.regex.Matcher; -import java.util.regex.Pattern; /** * Holds an opensearch response. It wraps the {@link HttpResponse} returned and associates it with @@ -116,79 +113,11 @@ public HttpEntity getEntity() { return response.getEntity(); } - /** - * Optimized regular expression to test if a string matches the RFC 1123 date - * format (with quotes and leading space). Start/end of line characters and - * atomic groups are used to prevent backtracking. - */ - private static final Pattern WARNING_HEADER_DATE_PATTERN = Pattern.compile("^ " + // start of line, leading space - // quoted RFC 1123 date format - "\"" + // opening quote - "(?>Mon|Tue|Wed|Thu|Fri|Sat|Sun), " + // day of week, atomic group to prevent backtracking - "\\d{2} " + // 2-digit day - "(?>Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec) " + // month, atomic group to prevent backtracking - "\\d{4} " + // 4-digit year - "\\d{2}:\\d{2}:\\d{2} " + // (two-digit hour):(two-digit minute):(two-digit second) - "GMT" + // GMT - "\"$"); // closing quote (optional, since an older version can still send a warn-date), end of line - - /** - * Length of RFC 1123 format (with quotes and leading space), used in - * matchWarningHeaderPatternByPrefix(String). - */ - private static final int WARNING_HEADER_DATE_LENGTH = 0 + 1 + 1 + 3 + 1 + 1 + 2 + 1 + 3 + 1 + 4 + 1 + 2 + 1 + 2 + 1 + 2 + 1 + 3 + 1; - - /** - * Tests if a string matches the RFC 7234 specification for warning headers. - * This assumes that the warn code is always 299 and the warn agent is always - * OpenSearch. - * - * @param s the value of a warning header formatted according to RFC 7234 - * @return {@code true} if the input string matches the specification - */ - private static boolean matchWarningHeaderPatternByPrefix(final String s) { - return s.startsWith("299 OpenSearch-"); - } - - /** - * Refer to org.opensearch.common.logging.DeprecationLogger - */ - private static String extractWarningValueFromWarningHeader(final String s) { - String warningHeader = s; - - /* - * The following block tests for the existence of a RFC 1123 date in the warning header. If the date exists, it is removed for - * extractWarningValueFromWarningHeader(String) to work properly (as it does not handle dates). - */ - if (s.length() > WARNING_HEADER_DATE_LENGTH) { - final String possibleDateString = s.substring(s.length() - WARNING_HEADER_DATE_LENGTH); - final Matcher matcher = WARNING_HEADER_DATE_PATTERN.matcher(possibleDateString); - - if (matcher.matches()) { - warningHeader = warningHeader.substring(0, s.length() - WARNING_HEADER_DATE_LENGTH); - } - } - - final int firstQuote = warningHeader.indexOf('\"'); - final int lastQuote = warningHeader.length() - 1; - final String warningValue = warningHeader.substring(firstQuote + 1, lastQuote); - return warningValue; - } - /** * Returns a list of all warning headers returned in the response. */ public List getWarnings() { - List warnings = new ArrayList<>(); - for (Header header : response.getHeaders("Warning")) { - String warning = header.getValue(); - if (matchWarningHeaderPatternByPrefix(warning)) { - warnings.add(extractWarningValueFromWarningHeader(warning)); - } else { - warnings.add(warning); - } - } - return warnings; + return ResponseWarningsExtractor.getWarnings(response); } /** diff --git a/client/rest/src/main/java/org/opensearch/client/ResponseWarningsExtractor.java b/client/rest/src/main/java/org/opensearch/client/ResponseWarningsExtractor.java new file mode 100644 index 0000000000000..441daff4f3af4 --- /dev/null +++ b/client/rest/src/main/java/org/opensearch/client/ResponseWarningsExtractor.java @@ -0,0 +1,99 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.client; + +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpResponse; + +import java.util.ArrayList; +import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +final class ResponseWarningsExtractor { + + /** + * Optimized regular expression to test if a string matches the RFC 1123 date + * format (with quotes and leading space). Start/end of line characters and + * atomic groups are used to prevent backtracking. + */ + private static final Pattern WARNING_HEADER_DATE_PATTERN = Pattern.compile("^ " + // start of line, leading space + // quoted RFC 1123 date format + "\"" + // opening quote + "(?>Mon|Tue|Wed|Thu|Fri|Sat|Sun), " + // day of week, atomic group to prevent backtracking + "\\d{2} " + // 2-digit day + "(?>Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec) " + // month, atomic group to prevent backtracking + "\\d{4} " + // 4-digit year + "\\d{2}:\\d{2}:\\d{2} " + // (two-digit hour):(two-digit minute):(two-digit second) + "GMT" + // GMT + "\"$"); // closing quote (optional, since an older version can still send a warn-date), end of line + + /** + * Length of RFC 1123 format (with quotes and leading space), used in + * matchWarningHeaderPatternByPrefix(String). + */ + private static final int WARNING_HEADER_DATE_LENGTH = 0 + 1 + 1 + 3 + 1 + 1 + 2 + 1 + 3 + 1 + 4 + 1 + 2 + 1 + 2 + 1 + 2 + 1 + 3 + 1; + + private ResponseWarningsExtractor() {} + + /** + * Returns a list of all warning headers returned in the response. + * @param response HTTP response + */ + static List getWarnings(final HttpResponse response) { + List warnings = new ArrayList<>(); + for (Header header : response.getHeaders("Warning")) { + String warning = header.getValue(); + if (matchWarningHeaderPatternByPrefix(warning)) { + warnings.add(extractWarningValueFromWarningHeader(warning)); + } else { + warnings.add(warning); + } + } + return warnings; + } + + /** + * Tests if a string matches the RFC 7234 specification for warning headers. + * This assumes that the warn code is always 299 and the warn agent is always + * OpenSearch. + * + * @param s the value of a warning header formatted according to RFC 7234 + * @return {@code true} if the input string matches the specification + */ + private static boolean matchWarningHeaderPatternByPrefix(final String s) { + return s.startsWith("299 OpenSearch-"); + } + + /** + * Refer to org.opensearch.common.logging.DeprecationLogger + */ + private static String extractWarningValueFromWarningHeader(final String s) { + String warningHeader = s; + + /* + * The following block tests for the existence of a RFC 1123 date in the warning header. If the date exists, it is removed for + * extractWarningValueFromWarningHeader(String) to work properly (as it does not handle dates). + */ + if (s.length() > WARNING_HEADER_DATE_LENGTH) { + final String possibleDateString = s.substring(s.length() - WARNING_HEADER_DATE_LENGTH); + final Matcher matcher = WARNING_HEADER_DATE_PATTERN.matcher(possibleDateString); + + if (matcher.matches()) { + warningHeader = warningHeader.substring(0, s.length() - WARNING_HEADER_DATE_LENGTH); + } + } + + final int firstQuote = warningHeader.indexOf('\"'); + final int lastQuote = warningHeader.length() - 1; + final String warningValue = warningHeader.substring(firstQuote + 1, lastQuote); + return warningValue; + } + +} diff --git a/client/rest/src/main/java/org/opensearch/client/RestClient.java b/client/rest/src/main/java/org/opensearch/client/RestClient.java index 15905add76c4f..5c87e3fda5701 100644 --- a/client/rest/src/main/java/org/opensearch/client/RestClient.java +++ b/client/rest/src/main/java/org/opensearch/client/RestClient.java @@ -62,14 +62,19 @@ import org.apache.hc.core5.http.HttpEntity; import org.apache.hc.core5.http.HttpHost; import org.apache.hc.core5.http.HttpRequest; +import org.apache.hc.core5.http.HttpResponse; +import org.apache.hc.core5.http.Message; import org.apache.hc.core5.http.io.entity.HttpEntityWrapper; +import org.apache.hc.core5.http.message.BasicClassicHttpResponse; import org.apache.hc.core5.http.message.RequestLine; import org.apache.hc.core5.http.nio.AsyncRequestProducer; import org.apache.hc.core5.http.nio.AsyncResponseConsumer; import org.apache.hc.core5.net.URIBuilder; +import org.apache.hc.core5.reactive.ReactiveResponseConsumer; import org.apache.hc.core5.reactor.IOReactorStatus; import org.apache.hc.core5.util.Args; import org.opensearch.client.http.HttpUriRequestProducer; +import org.opensearch.client.http.ReactiveHttpUriRequestProducer; import javax.net.ssl.SSLHandshakeException; @@ -83,6 +88,7 @@ import java.net.SocketTimeoutException; import java.net.URI; import java.net.URISyntaxException; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Arrays; import java.util.Base64; @@ -98,6 +104,7 @@ import java.util.Objects; import java.util.Optional; import java.util.Set; +import java.util.concurrent.CancellationException; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ExecutionException; @@ -106,6 +113,10 @@ import java.util.stream.Collectors; import java.util.zip.GZIPOutputStream; +import org.reactivestreams.Publisher; +import reactor.core.publisher.Mono; +import reactor.core.publisher.MonoSink; + import static java.nio.charset.StandardCharsets.UTF_8; import static java.util.Collections.singletonList; @@ -300,6 +311,23 @@ public boolean isRunning() { return client.getStatus() == IOReactorStatus.ACTIVE; } + /** + * Sends a streaming request to the OpenSearch cluster that the client points to and returns streaming response. This is an experimental API. + * @param request streaming request + * @return streaming response + * @throws IOException IOException + */ + public StreamingResponse streamRequest(StreamingRequest request) throws IOException { + final InternalStreamingRequest internalRequest = new InternalStreamingRequest(request); + + final StreamingResponse response = new StreamingResponse<>( + new RequestLine(internalRequest.httpRequest), + streamRequest(nextNodes(), internalRequest) + ); + + return response; + } + /** * Sends a request to the OpenSearch cluster that the client points to. * Blocks until the request is completed and returns its response or fails @@ -332,13 +360,13 @@ public Response performRequest(Request request) throws IOException { private Response performRequest(final NodeTuple> nodeTuple, final InternalRequest request, Exception previousException) throws IOException { - RequestContext context = request.createContextForNextAttempt(nodeTuple.nodes.next(), nodeTuple.authCache); + RequestContext context = request.createContextForNextAttempt(nodeTuple.nodes.next(), nodeTuple.authCache); ClassicHttpResponse httpResponse; try { - httpResponse = client.execute(context.requestProducer, context.asyncResponseConsumer, context.context, null).get(); + httpResponse = client.execute(context.requestProducer(), context.asyncResponseConsumer(), context.context(), null).get(); } catch (Exception e) { - RequestLogger.logFailedRequest(logger, request.httpRequest, context.node, e); - onFailure(context.node); + RequestLogger.logFailedRequest(logger, request.httpRequest, context.node(), e); + onFailure(context.node()); Exception cause = extractAndWrapCause(e); addSuppressedException(previousException, cause); if (nodeTuple.nodes.hasNext()) { @@ -352,7 +380,7 @@ private Response performRequest(final NodeTuple> nodeTuple, final } throw new IllegalStateException("unexpected exception type: must be either RuntimeException or IOException", cause); } - ResponseOrResponseException responseOrResponseException = convertResponse(request, context.node, httpResponse); + ResponseOrResponseException responseOrResponseException = convertResponse(request, context.node(), httpResponse); if (responseOrResponseException.responseException == null) { return responseOrResponseException.response; } @@ -363,6 +391,46 @@ private Response performRequest(final NodeTuple> nodeTuple, final throw responseOrResponseException.responseException; } + private Publisher>> streamRequest( + final NodeTuple> nodeTuple, + final InternalStreamingRequest request + ) throws IOException { + return request.cancellable.callIfNotCancelled(() -> { + final Node node = nodeTuple.nodes.next(); + + final Mono>> publisher = Mono.create(emitter -> { + final RequestContext context = request.createContextForNextAttempt(node, nodeTuple.authCache, emitter); + final Future future = client.execute( + context.requestProducer(), + context.asyncResponseConsumer(), + context.context(), + null + ); + + if (future instanceof org.apache.hc.core5.concurrent.Cancellable) { + request.httpRequest.setDependency((org.apache.hc.core5.concurrent.Cancellable) future); + } + }); + + return publisher.flatMap(message -> { + try { + final ResponseOrResponseException responseOrResponseException = convertResponse(request, node, message); + if (responseOrResponseException.responseException == null) { + return Mono.just(message); + } else { + if (nodeTuple.nodes.hasNext()) { + return Mono.from(streamRequest(nodeTuple, request)); + } else { + return Mono.error(responseOrResponseException.responseException); + } + } + } catch (final Exception ex) { + return Mono.error(ex); + } + }); + }); + } + private ResponseOrResponseException convertResponse(InternalRequest request, Node node, ClassicHttpResponse httpResponse) throws IOException { RequestLogger.logResponse(logger, request.httpRequest, node.getHost(), httpResponse); @@ -393,6 +461,40 @@ private ResponseOrResponseException convertResponse(InternalRequest request, Nod throw responseException; } + private ResponseOrResponseException convertResponse( + InternalStreamingRequest request, + Node node, + Message> message + ) throws IOException { + + // Streaming Response could accumulate a lot of data so we may not be able to fully consume it. + final ClassicHttpResponse httpResponse = new BasicClassicHttpResponse( + message.getHead().getCode(), + message.getHead().getReasonPhrase() + ); + final Response response = new Response(new RequestLine(request.httpRequest), node.getHost(), httpResponse); + + RequestLogger.logResponse(logger, request.httpRequest, node.getHost(), httpResponse); + int statusCode = httpResponse.getCode(); + + if (isSuccessfulResponse(statusCode) || request.ignoreErrorCodes.contains(response.getStatusLine().getStatusCode())) { + onResponse(node); + if (request.warningsHandler.warningsShouldFailRequest(response.getWarnings())) { + throw new WarningFailureException(response); + } + return new ResponseOrResponseException(response); + } + ResponseException responseException = new ResponseException(response); + if (isRetryStatus(statusCode)) { + // mark host dead and retry against next one + onFailure(node); + return new ResponseOrResponseException(responseException); + } + // mark host alive and don't retry, as the error should be a request problem + onResponse(node); + throw responseException; + } + /** * Sends a request to the OpenSearch cluster that the client points to. * The request is executed asynchronously and the provided @@ -427,16 +529,23 @@ private void performRequestAsync( final FailureTrackingResponseListener listener ) { request.cancellable.runIfNotCancelled(() -> { - final RequestContext context = request.createContextForNextAttempt(nodeTuple.nodes.next(), nodeTuple.authCache); + final RequestContext context = request.createContextForNextAttempt( + nodeTuple.nodes.next(), + nodeTuple.authCache + ); Future future = client.execute( - context.requestProducer, - context.asyncResponseConsumer, - context.context, + context.requestProducer(), + context.asyncResponseConsumer(), + context.context(), new FutureCallback() { @Override public void completed(ClassicHttpResponse httpResponse) { try { - ResponseOrResponseException responseOrResponseException = convertResponse(request, context.node, httpResponse); + ResponseOrResponseException responseOrResponseException = convertResponse( + request, + context.node(), + httpResponse + ); if (responseOrResponseException.responseException == null) { listener.onSuccess(responseOrResponseException.response); } else { @@ -455,8 +564,8 @@ public void completed(ClassicHttpResponse httpResponse) { @Override public void failed(Exception failure) { try { - RequestLogger.logFailedRequest(logger, request.httpRequest, context.node, failure); - onFailure(context.node); + RequestLogger.logFailedRequest(logger, request.httpRequest, context.node(), failure); + onFailure(context.node()); if (nodeTuple.nodes.hasNext()) { listener.trackFailure(failure); performRequestAsync(nodeTuple, request, listener); @@ -822,6 +931,66 @@ public void remove() { } } + private class InternalStreamingRequest { + private final StreamingRequest request; + private final Set ignoreErrorCodes; + private final HttpUriRequestBase httpRequest; + private final Cancellable cancellable; + private final WarningsHandler warningsHandler; + + InternalStreamingRequest(StreamingRequest request) { + this.request = request; + Map params = new HashMap<>(request.getParameters()); + // ignore is a special parameter supported by the clients, shouldn't be sent to es + String ignoreString = params.remove("ignore"); + this.ignoreErrorCodes = getIgnoreErrorCodes(ignoreString, request.getMethod()); + URI uri = buildUri(pathPrefix, request.getEndpoint(), params); + this.httpRequest = createHttpRequest(request.getMethod(), uri, null); + this.cancellable = Cancellable.fromRequest(httpRequest); + setHeaders(httpRequest, request.getOptions().getHeaders()); + setRequestConfig(httpRequest, request.getOptions().getRequestConfig()); + this.warningsHandler = request.getOptions().getWarningsHandler() == null + ? RestClient.this.warningsHandler + : request.getOptions().getWarningsHandler(); + } + + private void setHeaders(HttpRequest httpRequest, Collection
requestHeaders) { + // request headers override default headers, so we don't add default headers if they exist as request headers + final Set requestNames = new HashSet<>(requestHeaders.size()); + for (Header requestHeader : requestHeaders) { + httpRequest.addHeader(requestHeader); + requestNames.add(requestHeader.getName()); + } + for (Header defaultHeader : defaultHeaders) { + if (requestNames.contains(defaultHeader.getName()) == false) { + httpRequest.addHeader(defaultHeader); + } + } + if (compressionEnabled) { + httpRequest.addHeader("Accept-Encoding", "gzip"); + } + } + + private void setRequestConfig(HttpUriRequestBase httpRequest, RequestConfig requestConfig) { + if (requestConfig != null) { + httpRequest.setConfig(requestConfig); + } + } + + public Publisher getPublisher() { + return request.getBody(); + } + + RequestContext createContextForNextAttempt( + Node node, + AuthCache authCache, + MonoSink>> emitter + ) { + this.httpRequest.reset(); + return new ReactiveRequestContext(this, node, authCache, emitter); + } + } + private class InternalRequest { private final Request request; private final Set ignoreErrorCodes; @@ -868,12 +1037,22 @@ private void setRequestConfig(HttpUriRequestBase httpRequest, RequestConfig requ } } - RequestContext createContextForNextAttempt(Node node, AuthCache authCache) { + RequestContext createContextForNextAttempt(Node node, AuthCache authCache) { this.httpRequest.reset(); - return new RequestContext(this, node, authCache); + return new AsyncRequestContext(this, node, authCache); } } + private interface RequestContext { + Node node(); + + AsyncRequestProducer requestProducer(); + + AsyncResponseConsumer asyncResponseConsumer(); + + HttpClientContext context(); + } + /** * The Apache HttpClient 5 adds "Authorization" header even if the credentials for basic authentication are not provided * (effectively, username and password are 'null'). To workaround that, wrapping the AuthCache around current HttpClientContext @@ -934,13 +1113,73 @@ public void clear() { } - private static class RequestContext { + private static class ReactiveRequestContext implements RequestContext { + private final Node node; + private final AsyncRequestProducer requestProducer; + private final AsyncResponseConsumer asyncResponseConsumer; + private final HttpClientContext context; + + ReactiveRequestContext( + InternalStreamingRequest request, + Node node, + AuthCache authCache, + MonoSink>> emitter + ) { + this.node = node; + // we stream the request body if the entity allows for it + this.requestProducer = ReactiveHttpUriRequestProducer.create(request.httpRequest, node.getHost(), request.getPublisher()); + this.asyncResponseConsumer = new ReactiveResponseConsumer(new FutureCallback>>() { + @Override + public void failed(Exception ex) { + emitter.error(ex); + } + + @Override + public void completed(Message> result) { + if (result == null) { + emitter.success(); + } else { + emitter.success(result); + } + } + + @Override + public void cancelled() { + failed(new CancellationException("Future cancelled")); + } + }); + this.context = HttpClientContext.create(); + context.setAuthCache(new WrappingAuthCache(context, authCache)); + } + + @Override + public AsyncResponseConsumer asyncResponseConsumer() { + return asyncResponseConsumer; + } + + @Override + public HttpClientContext context() { + return context; + } + + @Override + public Node node() { + return node; + } + + @Override + public AsyncRequestProducer requestProducer() { + return requestProducer; + } + } + + private static class AsyncRequestContext implements RequestContext { private final Node node; private final AsyncRequestProducer requestProducer; private final AsyncResponseConsumer asyncResponseConsumer; private final HttpClientContext context; - RequestContext(InternalRequest request, Node node, AuthCache authCache) { + AsyncRequestContext(InternalRequest request, Node node, AuthCache authCache) { this.node = node; // we stream the request body if the entity allows for it this.requestProducer = HttpUriRequestProducer.create(request.httpRequest, node.getHost()); @@ -950,6 +1189,26 @@ private static class RequestContext { this.context = HttpClientContext.create(); context.setAuthCache(new WrappingAuthCache(context, authCache)); } + + @Override + public AsyncResponseConsumer asyncResponseConsumer() { + return asyncResponseConsumer; + } + + @Override + public HttpClientContext context() { + return context; + } + + @Override + public Node node() { + return node; + } + + @Override + public AsyncRequestProducer requestProducer() { + return requestProducer; + } } private static Set getIgnoreErrorCodes(String ignoreString, String requestMethod) { diff --git a/client/rest/src/main/java/org/opensearch/client/StreamingRequest.java b/client/rest/src/main/java/org/opensearch/client/StreamingRequest.java new file mode 100644 index 0000000000000..e1767407b1353 --- /dev/null +++ b/client/rest/src/main/java/org/opensearch/client/StreamingRequest.java @@ -0,0 +1,114 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.client; + +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +import org.reactivestreams.Publisher; + +import static java.util.Collections.unmodifiableMap; + +/** + * HTTP Streaming Request to OpenSearch. This is an experimental API. + */ +public class StreamingRequest { + private final String method; + private final String endpoint; + private final Map parameters = new HashMap<>(); + + private RequestOptions options = RequestOptions.DEFAULT; + private final Publisher publisher; + + /** + * Constructor + * @param method method + * @param endpoint endpoint + * @param publisher publisher + */ + public StreamingRequest(String method, String endpoint, Publisher publisher) { + this.method = method; + this.endpoint = endpoint; + this.publisher = publisher; + } + + /** + * Get endpoint + * @return endpoint + */ + public String getEndpoint() { + return endpoint; + } + + /** + * Get method + * @return method + */ + public String getMethod() { + return method; + } + + /** + * Get options + * @return options + */ + public RequestOptions getOptions() { + return options; + } + + /** + * Get parameters + * @return parameters + */ + public Map getParameters() { + if (options.getParameters().isEmpty()) { + return unmodifiableMap(parameters); + } else { + Map combinedParameters = new HashMap<>(parameters); + combinedParameters.putAll(options.getParameters()); + return unmodifiableMap(combinedParameters); + } + } + + /** + * Add a query string parameter. + * @param name the name of the url parameter. Must not be null. + * @param value the value of the url url parameter. If {@code null} then + * the parameter is sent as {@code name} rather than {@code name=value} + * @throws IllegalArgumentException if a parameter with that name has + * already been set + */ + public void addParameter(String name, String value) { + Objects.requireNonNull(name, "url parameter name cannot be null"); + if (parameters.containsKey(name)) { + throw new IllegalArgumentException("url parameter [" + name + "] has already been set to [" + parameters.get(name) + "]"); + } else { + parameters.put(name, value); + } + } + + /** + * Add query parameters using the provided map of key value pairs. + * + * @param paramSource a map of key value pairs where the key is the url parameter. + * @throws IllegalArgumentException if a parameter with that name has already been set. + */ + public void addParameters(Map paramSource) { + paramSource.forEach(this::addParameter); + } + + /** + * Body publisher + * @return body publisher + */ + public Publisher getBody() { + return publisher; + } +} diff --git a/client/rest/src/main/java/org/opensearch/client/StreamingResponse.java b/client/rest/src/main/java/org/opensearch/client/StreamingResponse.java new file mode 100644 index 0000000000000..87d404c115723 --- /dev/null +++ b/client/rest/src/main/java/org/opensearch/client/StreamingResponse.java @@ -0,0 +1,96 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.client; + +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.HttpResponse; +import org.apache.hc.core5.http.Message; +import org.apache.hc.core5.http.message.RequestLine; +import org.apache.hc.core5.http.message.StatusLine; + +import java.util.List; + +import org.reactivestreams.Publisher; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +/** + * HTTP Streaming Response from OpenSearch. This is an experimental API. + */ +public class StreamingResponse { + private final RequestLine requestLine; + private final Mono>> publisher; + private volatile HttpHost host; + + /** + * Constructor + * @param requestLine request line + * @param publisher message publisher(response with a body) + */ + public StreamingResponse(RequestLine requestLine, Publisher>> publisher) { + this.requestLine = requestLine; + // We cache the publisher here so the body or / and HttpResponse could + // be consumed independently or/and more than once. + this.publisher = Mono.from(publisher).cache(); + } + + /** + * Set host + * @param host host + */ + public void setHost(HttpHost host) { + this.host = host; + } + + /** + * Get request line + * @return request line + */ + public RequestLine getRequestLine() { + return requestLine; + } + + /** + * Get host + * @return host + */ + public HttpHost getHost() { + return host; + } + + /** + * Get response boby {@link Publisher} + * @return response boby {@link Publisher} + */ + public Publisher getBody() { + return publisher.flatMapMany(m -> Flux.from(m.getBody())); + } + + /** + * Returns the status line of the current response + */ + public StatusLine getStatusLine() { + return new StatusLine( + publisher.map(Message::getHead) + .onErrorResume(ResponseException.class, e -> Mono.just(e.getResponse().getHttpResponse())) + .block() + ); + } + + /** + * Returns a list of all warning headers returned in the response. + */ + public List getWarnings() { + return ResponseWarningsExtractor.getWarnings( + publisher.map(Message::getHead) + .onErrorResume(ResponseException.class, e -> Mono.just(e.getResponse().getHttpResponse())) + .block() + ); + } +} diff --git a/client/rest/src/main/java/org/opensearch/client/http/ReactiveHttpUriRequestProducer.java b/client/rest/src/main/java/org/opensearch/client/http/ReactiveHttpUriRequestProducer.java new file mode 100644 index 0000000000000..63a71e29b8b31 --- /dev/null +++ b/client/rest/src/main/java/org/opensearch/client/http/ReactiveHttpUriRequestProducer.java @@ -0,0 +1,75 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.client.http; + +import org.apache.hc.client5.http.classic.methods.HttpUriRequestBase; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.nio.AsyncEntityProducer; +import org.apache.hc.core5.http.nio.support.BasicRequestProducer; +import org.apache.hc.core5.net.URIAuthority; +import org.apache.hc.core5.reactive.ReactiveEntityProducer; +import org.apache.hc.core5.util.Args; + +import java.nio.ByteBuffer; + +import org.reactivestreams.Publisher; + +/** + * The reactive producer of the {@link HttpUriRequestBase} instances associated with a particular {@link HttpHost} + */ +public class ReactiveHttpUriRequestProducer extends BasicRequestProducer { + private final HttpUriRequestBase request; + + ReactiveHttpUriRequestProducer(final HttpUriRequestBase request, final AsyncEntityProducer entityProducer) { + super(request, entityProducer); + this.request = request; + } + + /** + * Get the produced {@link HttpUriRequestBase} instance + * @return produced {@link HttpUriRequestBase} instance + */ + public HttpUriRequestBase getRequest() { + return request; + } + + /** + * Create new request producer for {@link HttpUriRequestBase} instance and {@link HttpHost} + * @param request {@link HttpUriRequestBase} instance + * @param host {@link HttpHost} instance + * @param publisher publisher + * @return new request producer + */ + public static ReactiveHttpUriRequestProducer create( + final HttpUriRequestBase request, + final HttpHost host, + Publisher publisher + ) { + Args.notNull(request, "Request"); + Args.notNull(host, "HttpHost"); + + // TODO: Should we copy request here instead of modifying in place? + request.setAuthority(new URIAuthority(host)); + request.setScheme(host.getSchemeName()); + + final Header contentTypeHeader = request.getFirstHeader("Content-Type"); + final ContentType contentType = (contentTypeHeader == null) + ? ContentType.APPLICATION_JSON + : ContentType.parse(contentTypeHeader.getValue()); + + final Header contentEncodingHeader = request.getFirstHeader("Content-Encoding"); + final String contentEncoding = (contentEncodingHeader == null) ? null : contentEncodingHeader.getValue(); + + final AsyncEntityProducer entityProducer = new ReactiveEntityProducer(publisher, -1, contentType, contentEncoding); + return new ReactiveHttpUriRequestProducer(request, entityProducer); + } + +} diff --git a/client/rest/src/test/java/org/opensearch/client/RestClientTests.java b/client/rest/src/test/java/org/opensearch/client/RestClientTests.java index dd51da3a30d8c..f4f1c57cdd588 100644 --- a/client/rest/src/test/java/org/opensearch/client/RestClientTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RestClientTests.java @@ -56,12 +56,15 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.function.Supplier; +import reactor.core.publisher.Mono; + import static java.util.Collections.singletonList; import static org.hamcrest.Matchers.instanceOf; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertSame; import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertThrows; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.mockito.Mockito.mock; @@ -418,6 +421,16 @@ public void testIsRunning() { assertFalse(restClient.isRunning()); } + public void testStreamWithUnsupportedMethod() throws Exception { + try (RestClient restClient = createRestClient()) { + final UnsupportedOperationException ex = assertThrows( + UnsupportedOperationException.class, + () -> restClient.streamRequest(new StreamingRequest<>("unsupported", randomAsciiLettersOfLength(5), Mono.empty())) + ); + assertEquals("http method not supported: unsupported", ex.getMessage()); + } + } + private static void assertNodes(NodeTuple> nodeTuple, AtomicInteger lastNodeIndex, int runs) throws IOException { int distance = lastNodeIndex.get() % nodeTuple.nodes.size(); /* diff --git a/client/sniffer/licenses/httpclient5-5.2.1.jar.sha1 b/client/sniffer/licenses/httpclient5-5.2.1.jar.sha1 deleted file mode 100644 index 3555fe22f8e12..0000000000000 --- a/client/sniffer/licenses/httpclient5-5.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0c900514d3446d9ce5d9dbd90c21192048125440 \ No newline at end of file diff --git a/client/sniffer/licenses/httpclient5-5.2.3.jar.sha1 b/client/sniffer/licenses/httpclient5-5.2.3.jar.sha1 new file mode 100644 index 0000000000000..43e233e72001a --- /dev/null +++ b/client/sniffer/licenses/httpclient5-5.2.3.jar.sha1 @@ -0,0 +1 @@ +5d753a99d299756998a08c488f2efdf9cf26198e \ No newline at end of file diff --git a/client/sniffer/licenses/httpcore5-5.2.2.jar.sha1 b/client/sniffer/licenses/httpcore5-5.2.2.jar.sha1 deleted file mode 100644 index b641256c7d4a4..0000000000000 --- a/client/sniffer/licenses/httpcore5-5.2.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6da28f5aa6c2b129ef49632e041a5203ce7507b2 \ No newline at end of file diff --git a/client/sniffer/licenses/httpcore5-5.2.5.jar.sha1 b/client/sniffer/licenses/httpcore5-5.2.5.jar.sha1 new file mode 100644 index 0000000000000..ca97e8612ea39 --- /dev/null +++ b/client/sniffer/licenses/httpcore5-5.2.5.jar.sha1 @@ -0,0 +1 @@ +dab1e18842971a45ca8942491ce005ab86a028d7 \ No newline at end of file diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle index 621620eef9d71..25af649bb4aed 100644 --- a/distribution/packages/build.gradle +++ b/distribution/packages/build.gradle @@ -63,7 +63,7 @@ import java.util.regex.Pattern */ plugins { - id "com.netflix.nebula.ospackage-base" version "11.9.1" + id "com.netflix.nebula.ospackage-base" version "11.10.0" } void addProcessFilesTask(String type, boolean jdk) { diff --git a/distribution/tools/plugin-cli/build.gradle b/distribution/tools/plugin-cli/build.gradle index 3083ad4375460..784cdc457a1a9 100644 --- a/distribution/tools/plugin-cli/build.gradle +++ b/distribution/tools/plugin-cli/build.gradle @@ -37,8 +37,8 @@ base { dependencies { compileOnly project(":server") compileOnly project(":libs:opensearch-cli") - api "org.bouncycastle:bcpg-fips:1.0.7.1" - api "org.bouncycastle:bc-fips:1.0.2.5" + api "org.bouncycastle:bcpg-fips:2.0.9" + api "org.bouncycastle:bc-fips:2.0.0" testImplementation project(":test:framework") testImplementation 'com.google.jimfs:jimfs:1.3.0' testRuntimeOnly("com.google.guava:guava:${versions.guava}") { @@ -58,33 +58,6 @@ test { jvmArgs += [ "-Djava.security.egd=file:/dev/urandom" ] } -/* - * these two classes intentionally use the following JDK internal APIs in order to offer the necessary - * functionality - * - * sun.security.internal.spec.TlsKeyMaterialParameterSpec - * sun.security.internal.spec.TlsKeyMaterialSpec - * sun.security.internal.spec.TlsMasterSecretParameterSpec - * sun.security.internal.spec.TlsPrfParameterSpec - * sun.security.internal.spec.TlsRsaPremasterSecretParameterSpec - * sun.security.provider.SecureRandom - * - */ -thirdPartyAudit.ignoreViolations( - 'org.bouncycastle.jcajce.provider.BouncyCastleFipsProvider$CoreSecureRandom', - 'org.bouncycastle.jcajce.provider.ProvSunTLSKDF', - 'org.bouncycastle.jcajce.provider.ProvSunTLSKDF$BaseTLSKeyGeneratorSpi', - 'org.bouncycastle.jcajce.provider.ProvSunTLSKDF$TLSKeyMaterialGenerator', - 'org.bouncycastle.jcajce.provider.ProvSunTLSKDF$TLSKeyMaterialGenerator$2', - 'org.bouncycastle.jcajce.provider.ProvSunTLSKDF$TLSMasterSecretGenerator', - 'org.bouncycastle.jcajce.provider.ProvSunTLSKDF$TLSMasterSecretGenerator$2', - 'org.bouncycastle.jcajce.provider.ProvSunTLSKDF$TLSPRFKeyGenerator', - 'org.bouncycastle.jcajce.provider.ProvSunTLSKDF$TLSRsaPreMasterSecretGenerator', - 'org.bouncycastle.jcajce.provider.ProvSunTLSKDF$TLSRsaPreMasterSecretGenerator$2', - 'org.bouncycastle.jcajce.provider.ProvSunTLSKDF$TLSExtendedMasterSecretGenerator', - 'org.bouncycastle.jcajce.provider.ProvSunTLSKDF$TLSExtendedMasterSecretGenerator$2' -) - thirdPartyAudit.ignoreMissingClasses( 'org.brotli.dec.BrotliInputStream', 'org.objectweb.asm.AnnotationVisitor', diff --git a/distribution/tools/plugin-cli/licenses/bc-fips-1.0.2.5.jar.sha1 b/distribution/tools/plugin-cli/licenses/bc-fips-1.0.2.5.jar.sha1 deleted file mode 100644 index 1b44c77dd4ee1..0000000000000 --- a/distribution/tools/plugin-cli/licenses/bc-fips-1.0.2.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -704e65f7e4fe679e5ab2aa8a840f27f8ced4c522 \ No newline at end of file diff --git a/distribution/tools/plugin-cli/licenses/bc-fips-2.0.0.jar.sha1 b/distribution/tools/plugin-cli/licenses/bc-fips-2.0.0.jar.sha1 new file mode 100644 index 0000000000000..79f0e3e9930bb --- /dev/null +++ b/distribution/tools/plugin-cli/licenses/bc-fips-2.0.0.jar.sha1 @@ -0,0 +1 @@ +ee9ac432cf08f9a9ebee35d7cf8a45f94959a7ab \ No newline at end of file diff --git a/distribution/tools/plugin-cli/licenses/bcpg-fips-1.0.7.1.jar.sha1 b/distribution/tools/plugin-cli/licenses/bcpg-fips-1.0.7.1.jar.sha1 deleted file mode 100644 index 44cebc7c92d87..0000000000000 --- a/distribution/tools/plugin-cli/licenses/bcpg-fips-1.0.7.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5e1952428655ea822066f86df2e3ecda8fa0ba2b \ No newline at end of file diff --git a/distribution/tools/plugin-cli/licenses/bcpg-fips-2.0.9.jar.sha1 b/distribution/tools/plugin-cli/licenses/bcpg-fips-2.0.9.jar.sha1 new file mode 100644 index 0000000000000..20cdbf6dc8aa8 --- /dev/null +++ b/distribution/tools/plugin-cli/licenses/bcpg-fips-2.0.9.jar.sha1 @@ -0,0 +1 @@ +f69719ef8dbf34d5f906ce480496446b2fd2ae27 \ No newline at end of file diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar index 2c3521197d7c4..a4b76b9530d66 100644 Binary files a/gradle/wrapper/gradle-wrapper.jar and b/gradle/wrapper/gradle-wrapper.jar differ diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index c7f182843385d..39a291b258efb 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -11,7 +11,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-8.9-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-8.10-all.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionSha256Sum=258e722ec21e955201e31447b0aed14201765a3bfbae296a46cf60b70e66db70 +distributionSha256Sum=682b4df7fe5accdca84a4d1ef6a3a6ab096b3efd5edf7de2bd8c758d95a93703 diff --git a/libs/core/src/main/java/org/opensearch/Version.java b/libs/core/src/main/java/org/opensearch/Version.java index b647a92d6708a..28cb989185ff7 100644 --- a/libs/core/src/main/java/org/opensearch/Version.java +++ b/libs/core/src/main/java/org/opensearch/Version.java @@ -107,6 +107,8 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_2_15_0 = new Version(2150099, org.apache.lucene.util.Version.LUCENE_9_10_0); public static final Version V_2_15_1 = new Version(2150199, org.apache.lucene.util.Version.LUCENE_9_10_0); public static final Version V_2_16_0 = new Version(2160099, org.apache.lucene.util.Version.LUCENE_9_11_1); + public static final Version V_2_16_1 = new Version(2160199, org.apache.lucene.util.Version.LUCENE_9_11_1); + public static final Version V_2_17_0 = new Version(2170099, org.apache.lucene.util.Version.LUCENE_9_11_1); public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_12_0); public static final Version CURRENT = V_3_0_0; diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/AbstractXContentParser.java b/libs/core/src/main/java/org/opensearch/core/xcontent/AbstractXContentParser.java index 4efaacecd0e67..4605aa684db1c 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/AbstractXContentParser.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/AbstractXContentParser.java @@ -375,7 +375,7 @@ private static void skipToListStart(XContentParser parser) throws IOException { } } - // read a list without bounds checks, assuming the the current parser is always on an array start + // read a list without bounds checks, assuming the current parser is always on an array start private static List readListUnsafe(XContentParser parser, Supplier> mapFactory) throws IOException { assert parser.currentToken() == Token.START_ARRAY; ArrayList list = new ArrayList<>(); diff --git a/libs/secure-sm/src/main/java/org/opensearch/secure_sm/ThreadContextPermission.java b/libs/secure-sm/src/main/java/org/opensearch/secure_sm/ThreadContextPermission.java new file mode 100644 index 0000000000000..2f33eb513c165 --- /dev/null +++ b/libs/secure-sm/src/main/java/org/opensearch/secure_sm/ThreadContextPermission.java @@ -0,0 +1,40 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.secure_sm; + +import java.security.BasicPermission; + +/** + * Permission to utilize methods in the ThreadContext class that are normally not accessible + * + * @see ThreadGroup + * @see SecureSM + */ +public final class ThreadContextPermission extends BasicPermission { + + /** + * Creates a new ThreadContextPermission object. + * + * @param name target name + */ + public ThreadContextPermission(String name) { + super(name); + } + + /** + * Creates a new ThreadContextPermission object. + * This constructor exists for use by the {@code Policy} object to instantiate new Permission objects. + * + * @param name target name + * @param actions ignored + */ + public ThreadContextPermission(String name, String actions) { + super(name, actions); + } +} diff --git a/libs/task-commons/build.gradle b/libs/task-commons/build.gradle new file mode 100644 index 0000000000000..dde3acd8effcf --- /dev/null +++ b/libs/task-commons/build.gradle @@ -0,0 +1,25 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +dependencies { + api project(':libs:opensearch-common') + + testImplementation "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" + testImplementation "junit:junit:${versions.junit}" + testImplementation "org.hamcrest:hamcrest:${versions.hamcrest}" + testImplementation(project(":test:framework")) { + exclude group: 'org.opensearch', module: 'opensearch-task-commons' + } +} + +tasks.named('forbiddenApisMain').configure { + replaceSignatureFiles 'jdk-signatures' +} diff --git a/libs/task-commons/src/main/java/org/opensearch/task/commons/clients/TaskListRequest.java b/libs/task-commons/src/main/java/org/opensearch/task/commons/clients/TaskListRequest.java new file mode 100644 index 0000000000000..625e15dfb3b6d --- /dev/null +++ b/libs/task-commons/src/main/java/org/opensearch/task/commons/clients/TaskListRequest.java @@ -0,0 +1,103 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.task.commons.clients; + +import org.opensearch.task.commons.task.TaskStatus; +import org.opensearch.task.commons.task.TaskType; +import org.opensearch.task.commons.worker.WorkerNode; + +/** + * Request object for listing tasks + */ +public class TaskListRequest { + + /** + * Filters listTasks response by specific task status' + */ + private TaskStatus[] taskStatus; + + /** + * Filter listTasks response by specific task types + */ + private TaskType[] taskTypes; + + /** + * Filter listTasks response by specific worker node + */ + private WorkerNode workerNodes; + + /** + * Depicts the start page number for the list call. + * + * @see TaskManagerClient#listTasks(TaskListRequest) + */ + private int startPageNumber; + + /** + * Depicts the page size for the list call. + * + * @see TaskManagerClient#listTasks(TaskListRequest) + */ + private int pageSize; + + /** + * Default constructor + */ + public TaskListRequest() {} + + /** + * Update task types to filter with in the request + * @param taskTypes TaskType[] + * @return ListTaskRequest + */ + public TaskListRequest taskType(TaskType... taskTypes) { + this.taskTypes = taskTypes; + return this; + } + + /** + * Update task status to filter with in the request + * @param taskStatus TaskStatus[] + * @return ListTaskRequest + */ + public TaskListRequest taskType(TaskStatus... taskStatus) { + this.taskStatus = taskStatus; + return this; + } + + /** + * Update worker node to filter with in the request + * @param workerNode WorkerNode + * @return ListTaskRequest + */ + private TaskListRequest workerNode(WorkerNode workerNode) { + this.workerNodes = workerNode; + return this; + } + + /** + * Update page number to start with when fetching the list of tasks + * @param startPageNumber startPageNumber + * @return ListTaskRequest + */ + public TaskListRequest startPageNumber(int startPageNumber) { + this.startPageNumber = startPageNumber; + return this; + } + + /** + * Update page size for the list tasks response + * @param pageSize int + * @return ListTaskRequest + */ + public TaskListRequest pageSize(int pageSize) { + this.pageSize = pageSize; + return this; + } +} diff --git a/libs/task-commons/src/main/java/org/opensearch/task/commons/clients/TaskManagerClient.java b/libs/task-commons/src/main/java/org/opensearch/task/commons/clients/TaskManagerClient.java new file mode 100644 index 0000000000000..23ad8eabdc365 --- /dev/null +++ b/libs/task-commons/src/main/java/org/opensearch/task/commons/clients/TaskManagerClient.java @@ -0,0 +1,69 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.task.commons.clients; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.task.commons.task.Task; +import org.opensearch.task.commons.task.TaskId; +import org.opensearch.task.commons.worker.WorkerNode; + +import java.util.List; + +/** + * Client used to interact with Task Store/Queue. + * + * TODO: TaskManager can be something not running an opensearch process. + * We need to come up with a way to allow this interface to be used with in and out opensearch as well + * + * @opensearch.experimental + */ +@ExperimentalApi +public interface TaskManagerClient { + + /** + * Get task from TaskStore/Queue + * + * @param taskId TaskId of the task to be retrieved + * @return Task corresponding to TaskId + */ + Task getTask(TaskId taskId); + + /** + * Update task in TaskStore/Queue + * + * @param task Task to be updated + */ + void updateTask(Task task); + + /** + * Mark task as cancelled. + * Ongoing Tasks can be cancelled as well if the corresponding worker supports cancellation + * + * @param taskId TaskId of the task to be cancelled + */ + void cancelTask(TaskId taskId); + + /** + * List all tasks applying all the filters present in listTaskRequest + * + * @param taskListRequest TaskListRequest + * @return list of all the task matching the filters in listTaskRequest + */ + List listTasks(TaskListRequest taskListRequest); + + /** + * Assign Task to a particular WorkerNode. This ensures no 2 worker Nodes work on the same task. + * This API can be used in both pull and push models of task assignment. + * + * @param taskId TaskId of the task to be assigned + * @param node WorkerNode task is being assigned to + * @return true if task is assigned successfully, false otherwise + */ + boolean assignTask(TaskId taskId, WorkerNode node); +} diff --git a/libs/task-commons/src/main/java/org/opensearch/task/commons/clients/TaskProducerClient.java b/libs/task-commons/src/main/java/org/opensearch/task/commons/clients/TaskProducerClient.java new file mode 100644 index 0000000000000..18f3421f9aa97 --- /dev/null +++ b/libs/task-commons/src/main/java/org/opensearch/task/commons/clients/TaskProducerClient.java @@ -0,0 +1,24 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.task.commons.clients; + +import org.opensearch.task.commons.task.Task; + +/** + * Producer interface used to submit new tasks for execution on worker nodes. + */ +public interface TaskProducerClient { + + /** + * Submit a new task to TaskStore/Queue + * + * @param task Task to be submitted for execution on offline nodes + */ + void submitTask(Task task); +} diff --git a/libs/task-commons/src/main/java/org/opensearch/task/commons/clients/TaskWorkerClient.java b/libs/task-commons/src/main/java/org/opensearch/task/commons/clients/TaskWorkerClient.java new file mode 100644 index 0000000000000..59abe189434dd --- /dev/null +++ b/libs/task-commons/src/main/java/org/opensearch/task/commons/clients/TaskWorkerClient.java @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.task.commons.clients; + +import org.opensearch.task.commons.task.Task; +import org.opensearch.task.commons.task.TaskId; + +import java.util.List; + +/** + * Consumer interface used to find new tasks assigned to a {@code WorkerNode} for execution. + */ +public interface TaskWorkerClient { + + /** + * List all tasks assigned to a WorkerNode. + * Useful when the implementation uses a separate store for Task assignments to Worker nodes + * + * @param taskListRequest TaskListRequest + * @return list of all tasks assigned to a WorkerNode + */ + List getAssignedTasks(TaskListRequest taskListRequest); + + /** + * Sends task heart beat to Task Store/Queue + * + * @param taskId TaskId of Task to send heartbeat for + * @param timestamp timestamp of heartbeat to be recorded in TaskStore/Queue + */ + void sendTaskHeartbeat(TaskId taskId, long timestamp); + +} diff --git a/libs/task-commons/src/main/java/org/opensearch/task/commons/clients/package-info.java b/libs/task-commons/src/main/java/org/opensearch/task/commons/clients/package-info.java new file mode 100644 index 0000000000000..1329f3888248c --- /dev/null +++ b/libs/task-commons/src/main/java/org/opensearch/task/commons/clients/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Contains task client related classes + */ +package org.opensearch.task.commons.clients; diff --git a/libs/task-commons/src/main/java/org/opensearch/task/commons/package-info.java b/libs/task-commons/src/main/java/org/opensearch/task/commons/package-info.java new file mode 100644 index 0000000000000..4cb773ace62ce --- /dev/null +++ b/libs/task-commons/src/main/java/org/opensearch/task/commons/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Contains offline tasks related classes + */ +package org.opensearch.task.commons; diff --git a/libs/task-commons/src/main/java/org/opensearch/task/commons/task/Task.java b/libs/task-commons/src/main/java/org/opensearch/task/commons/task/Task.java new file mode 100644 index 0000000000000..7ad567b57bd42 --- /dev/null +++ b/libs/task-commons/src/main/java/org/opensearch/task/commons/task/Task.java @@ -0,0 +1,361 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.task.commons.task; + +import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.task.commons.worker.WorkerNode; + +/** + * A Background Task to be run on Offline Node. + */ +@ExperimentalApi +public class Task { + + /** + * Task identifier used to uniquely identify a Task + */ + private final TaskId taskId; + + /** + * Depicts latest state of the Task + */ + private final TaskStatus taskStatus; + + /** + * Various params to used for Task execution + */ + private final TaskParams params; + + /** + * Type/Category of the Task + */ + private final TaskType taskType; + + /** + * Worker Node on which the Task is to be executed + */ + private final WorkerNode assignedNode; + + /** + * Timestamp at which the Task was created + */ + private final long createdAt; + + /** + * Timestamp at which the Task was assigned to a worker + */ + private final long assignedAt; + + /** + * Timestamp at which the Task was started execution on worker + */ + private final long startedAt; + + /** + * Timestamp at which the Task was either completed/failed/cancelled + */ + private final long completedAt; + + /** + * Timestamp at which last heartbeat was sent by the worker + */ + private final long lastHeartbeatAt; + + /** + * Constructor for Task + * + * @param taskId Task identifier + * @param taskStatus Task status + * @param params Task Params + * @param taskType Task Type + * @param createdAt Timestamp at which the Task was created + * @param assignedAt Timestamp at which the Task was assigned to a worker + * @param startedAt Timestamp at which the Task was started execution on worker + * @param completedAt Timestamp at which the Task was either completed/failed/cancelled + * @param lastHeartbeatAt Timestamp at which last heartbeat was sent by the worker + * @param assignedNode Worker Node on which the Task is to be executed + */ + public Task( + TaskId taskId, + TaskStatus taskStatus, + TaskParams params, + TaskType taskType, + long createdAt, + @Nullable long assignedAt, + @Nullable long startedAt, + @Nullable long completedAt, + @Nullable long lastHeartbeatAt, + @Nullable WorkerNode assignedNode + ) { + this.taskId = taskId; + this.taskStatus = taskStatus; + this.params = params; + this.taskType = taskType; + this.createdAt = createdAt; + this.assignedAt = assignedAt; + this.startedAt = startedAt; + this.completedAt = completedAt; + this.lastHeartbeatAt = lastHeartbeatAt; + this.assignedNode = assignedNode; + } + + /** + * Get TaskId + * @return TaskId + */ + public TaskId getTaskId() { + return taskId; + } + + /** + * Get TaskStatus + * @return TaskStatus + */ + public TaskStatus getTaskStatus() { + return taskStatus; + } + + /** + * Get TaskParams + * @return TaskParams + */ + public TaskParams getParams() { + return params; + } + + /** + * Get TaskType + * @return TaskType + */ + public TaskType getTaskType() { + return taskType; + } + + /** + * Get Task Creation Time + * @return createdAt + */ + public long getCreatedAt() { + return createdAt; + } + + /** + * Get Task Assignment Time + * @return assignedAt + */ + public long getAssignedAt() { + return assignedAt; + } + + /** + * Get Task Start Time + * @return startedAt + */ + public long getStartedAt() { + return startedAt; + } + + /** + * Get Task Completion Time + * @return completedAt + */ + public long getCompletedAt() { + return completedAt; + } + + /** + * Get Last Heartbeat Time + * @return lastHeartbeatAt + */ + public long getLastHeartbeatAt() { + return lastHeartbeatAt; + } + + /** + * Get Task Assigned Node + * @return assignedNode + */ + public WorkerNode getAssignedNode() { + return assignedNode; + } + + /** + * Builder class for Task. + */ + public static class Builder { + /** + * Task identifier used to uniquely identify a Task + */ + private final TaskId taskId; + + /** + * Depicts latest state of the Task + */ + private TaskStatus taskStatus; + + /** + * Various params to used for Task execution + */ + private final TaskParams params; + + /** + * Type/Category of the Task + */ + private final TaskType taskType; + + /** + * Type/Category of the Task + */ + private WorkerNode assignedNode; + + /** + * Timestamp at which the Task was created + */ + private final long createdAt; + + /** + * Timestamp at which the Task was assigned to a worker + */ + private long assignedAt; + + /** + * Timestamp at which the Task was started execution on worker + */ + private long startedAt; + + /** + * Timestamp at which the Task was either completed/failed/cancelled + */ + private long completedAt; + + /** + * Timestamp at which last heartbeat was sent by the worker + */ + private long lastHeartbeatAt; + + /** + * Constructor for Task Builder + * + * @param taskId Task identifier + * @param taskStatus Task status + * @param params Task Params + * @param taskType Task Type + * @param createdAt Task Creation Time + */ + private Builder(TaskId taskId, TaskStatus taskStatus, TaskParams params, TaskType taskType, long createdAt) { + this.taskId = taskId; + this.taskStatus = taskStatus; + this.params = params; + this.taskType = taskType; + this.createdAt = createdAt; + } + + /** + * Build Builder from Task + * @param task Task to build from + * @return Task.Builder + */ + public static Builder builder(Task task) { + Builder builder = new Builder( + task.getTaskId(), + task.getTaskStatus(), + task.getParams(), + task.getTaskType(), + task.getCreatedAt() + ); + builder.assignedAt(task.getAssignedAt()); + builder.startedAt(task.getStartedAt()); + builder.completedAt(task.getCompletedAt()); + builder.lastHeartbeatAt(task.getLastHeartbeatAt()); + builder.assignedNode(task.getAssignedNode()); + return builder; + } + + /** + * Build Builder from various Task attributes + * @param taskId Task identifier + * @param taskStatus TaskStatus + * @param params TaskParams + * @param taskType TaskType + * @param createdAt Task Creation Time + * @return Task.Builder + */ + public static Builder builder(TaskId taskId, TaskStatus taskStatus, TaskParams params, TaskType taskType, long createdAt) { + return new Builder(taskId, taskStatus, params, taskType, createdAt); + } + + /** + * Set Task Assignment Time + * @param assignedAt Timestamp at which the Task was assigned to a worker + */ + public void assignedAt(long assignedAt) { + this.assignedAt = assignedAt; + } + + /** + * Set Task Start Time + * @param startedAt Timestamp at which the Task was started execution on worker + */ + public void startedAt(long startedAt) { + this.startedAt = startedAt; + } + + /** + * Set Task Completion Time + * @param completedAt Timestamp at which the Task was either completed/failed/cancelled + */ + public void completedAt(long completedAt) { + this.completedAt = completedAt; + } + + /** + * Set Task Last Heartbeat Time for the task + * @param lastHeartbeatAt Timestamp at which last heartbeat was sent by the worker + */ + public void lastHeartbeatAt(long lastHeartbeatAt) { + this.lastHeartbeatAt = lastHeartbeatAt; + } + + /** + * Update the Task Status + * @param taskStatus {@link TaskStatus} - current status of the Task + */ + public void taskStatus(TaskStatus taskStatus) { + this.taskStatus = taskStatus; + } + + /** + * Set Task Assigned Node + * @param node Worker Node on which the Task is to be executed + */ + public void assignedNode(WorkerNode node) { + this.assignedNode = node; + } + + /** + * Build Task from Builder + * @return Task + */ + public Task build() { + return new Task( + taskId, + taskStatus, + params, + taskType, + createdAt, + assignedAt, + startedAt, + completedAt, + lastHeartbeatAt, + assignedNode + ); + } + } +} diff --git a/libs/task-commons/src/main/java/org/opensearch/task/commons/task/TaskId.java b/libs/task-commons/src/main/java/org/opensearch/task/commons/task/TaskId.java new file mode 100644 index 0000000000000..7fb7e1536dc17 --- /dev/null +++ b/libs/task-commons/src/main/java/org/opensearch/task/commons/task/TaskId.java @@ -0,0 +1,55 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.task.commons.task; + +import org.opensearch.common.annotation.ExperimentalApi; + +import java.util.Objects; + +/** + * Class encapsulating Task identifier + */ +@ExperimentalApi +public class TaskId { + + /** + * Identified of the Task + */ + private final String id; + + /** + * Constructor to initialize TaskId + * @param id String value of Task id + */ + public TaskId(String id) { + this.id = id; + } + + /** + * Get id value + * @return id + */ + public String getValue() { + return id; + } + + @Override + public boolean equals(Object obj) { + if (obj == null || getClass() != obj.getClass()) { + return false; + } + TaskId other = (TaskId) obj; + return this.id.equals(other.id); + } + + @Override + public int hashCode() { + return Objects.hash(id); + } +} diff --git a/libs/task-commons/src/main/java/org/opensearch/task/commons/task/TaskParams.java b/libs/task-commons/src/main/java/org/opensearch/task/commons/task/TaskParams.java new file mode 100644 index 0000000000000..ac6126a02cf24 --- /dev/null +++ b/libs/task-commons/src/main/java/org/opensearch/task/commons/task/TaskParams.java @@ -0,0 +1,23 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.task.commons.task; + +import org.opensearch.common.annotation.ExperimentalApi; + +/** + * Base class for all TaskParams implementation of various TaskTypes + */ +@ExperimentalApi +public abstract class TaskParams { + + /** + * Default constructor + */ + public TaskParams() {} +} diff --git a/libs/task-commons/src/main/java/org/opensearch/task/commons/task/TaskStatus.java b/libs/task-commons/src/main/java/org/opensearch/task/commons/task/TaskStatus.java new file mode 100644 index 0000000000000..e44e5c7fee620 --- /dev/null +++ b/libs/task-commons/src/main/java/org/opensearch/task/commons/task/TaskStatus.java @@ -0,0 +1,49 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.task.commons.task; + +import org.opensearch.common.annotation.ExperimentalApi; + +/** + * Task status enum + */ +@ExperimentalApi +public enum TaskStatus { + + /** + * TaskStatus of a Task which is not yet assigned to or picked up by a worker + */ + UNASSIGNED, + + /** + * TaskStatus of a Task which is assigned to or picked up by a worker but hasn't started execution yet. + * This status confirms that a worker will execute this task and no other worker should pick it up. + */ + ASSIGNED, + + /** + * TaskStatus of an in progress Task + */ + ACTIVE, + + /** + * TaskStatus of a finished Task + */ + SUCCESS, + + /** + * TaskStatus of a Task which failed in 1 or more attempts + */ + FAILED, + + /** + * TaskStatus of a cancelled Task + */ + CANCELLED +} diff --git a/libs/task-commons/src/main/java/org/opensearch/task/commons/task/TaskType.java b/libs/task-commons/src/main/java/org/opensearch/task/commons/task/TaskType.java new file mode 100644 index 0000000000000..ef267a86e3da4 --- /dev/null +++ b/libs/task-commons/src/main/java/org/opensearch/task/commons/task/TaskType.java @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.task.commons.task; + +import org.opensearch.common.annotation.ExperimentalApi; + +/** + * Enum for task type + */ +@ExperimentalApi +public enum TaskType { + /** + * For all segment merge related tasks + */ + MERGE, + + /** + * For all snapshot related tasks + */ + SNAPSHOT +} diff --git a/libs/task-commons/src/main/java/org/opensearch/task/commons/task/package-info.java b/libs/task-commons/src/main/java/org/opensearch/task/commons/task/package-info.java new file mode 100644 index 0000000000000..e99e4079cc7af --- /dev/null +++ b/libs/task-commons/src/main/java/org/opensearch/task/commons/task/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Contains tasks related classes + */ +package org.opensearch.task.commons.task; diff --git a/libs/task-commons/src/main/java/org/opensearch/task/commons/worker/TaskWorker.java b/libs/task-commons/src/main/java/org/opensearch/task/commons/worker/TaskWorker.java new file mode 100644 index 0000000000000..6d9fd1fed23e4 --- /dev/null +++ b/libs/task-commons/src/main/java/org/opensearch/task/commons/worker/TaskWorker.java @@ -0,0 +1,29 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.task.commons.worker; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.task.commons.task.Task; + +/** + * Task Worker that executes the Task + * + * @opensearch.experimental + */ +@ExperimentalApi +public interface TaskWorker { + + /** + * Execute the Task + * + * @param task Task to be executed + */ + void executeTask(Task task); + +} diff --git a/libs/task-commons/src/main/java/org/opensearch/task/commons/worker/WorkerNode.java b/libs/task-commons/src/main/java/org/opensearch/task/commons/worker/WorkerNode.java new file mode 100644 index 0000000000000..1b9426d472184 --- /dev/null +++ b/libs/task-commons/src/main/java/org/opensearch/task/commons/worker/WorkerNode.java @@ -0,0 +1,96 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.task.commons.worker; + +import java.util.Objects; + +/** + * Represents a worker node in the fleet + */ +public class WorkerNode { + /** + * The unique identifier of the worker node. + */ + private final String id; + + /** + * The name of the worker node. + */ + private final String name; + + /** + * The IP address of the worker node. + */ + private final String ip; + + /** + * Creates a new worker node with the given ID, name, and IP address. + * + * @param id The unique identifier of the worker node. + * @param name The name of the worker node. + * @param ip The IP address of the worker node. + */ + private WorkerNode(String id, String name, String ip) { + this.id = id; + this.name = name; + this.ip = ip; + } + + /** + * Creates a new worker node with the given ID, name, and IP address. + * + * @param id The unique identifier of the worker node. + * @param name The name of the worker node. + * @param ip The IP address of the worker node. + * @return The created worker node. + */ + public static WorkerNode createWorkerNode(String id, String name, String ip) { + return new WorkerNode(id, name, ip); + } + + /** + * Returns the unique identifier of the worker node. + * + * @return The ID of the worker node. + */ + public String getId() { + return id; + } + + /** + * Returns the name of the worker node. + * + * @return The name of the worker node. + */ + public String getName() { + return name; + } + + /** + * Returns the IP address of the worker node. + * + * @return The IP address of the worker node. + */ + public String getIp() { + return ip; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + WorkerNode that = (WorkerNode) o; + return Objects.equals(id, that.id) && Objects.equals(name, that.name) && Objects.equals(ip, that.ip); + } + + @Override + public int hashCode() { + return Objects.hash(id, name, ip); + } +} diff --git a/libs/task-commons/src/main/java/org/opensearch/task/commons/worker/package-info.java b/libs/task-commons/src/main/java/org/opensearch/task/commons/worker/package-info.java new file mode 100644 index 0000000000000..d74fa30e8b661 --- /dev/null +++ b/libs/task-commons/src/main/java/org/opensearch/task/commons/worker/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Contains task worker related classes + */ +package org.opensearch.task.commons.worker; diff --git a/libs/task-commons/src/test/java/org/opensearch/task/commons/mocks/MockTaskParams.java b/libs/task-commons/src/test/java/org/opensearch/task/commons/mocks/MockTaskParams.java new file mode 100644 index 0000000000000..21d2a3dd725b6 --- /dev/null +++ b/libs/task-commons/src/test/java/org/opensearch/task/commons/mocks/MockTaskParams.java @@ -0,0 +1,25 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.task.commons.mocks; + +import org.opensearch.task.commons.task.TaskParams; + +public class MockTaskParams extends TaskParams { + + private final String value; + + public MockTaskParams(String mockValue) { + super(); + value = mockValue; + } + + public String getValue() { + return value; + } +} diff --git a/libs/task-commons/src/test/java/org/opensearch/task/commons/task/TaskIdTests.java b/libs/task-commons/src/test/java/org/opensearch/task/commons/task/TaskIdTests.java new file mode 100644 index 0000000000000..e58382e9fc056 --- /dev/null +++ b/libs/task-commons/src/test/java/org/opensearch/task/commons/task/TaskIdTests.java @@ -0,0 +1,53 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.task.commons.task; + +import org.opensearch.test.OpenSearchTestCase; + +/** + * Tests for {@link TaskId} + */ +public class TaskIdTests extends OpenSearchTestCase { + + public void testConstructorAndGetValue() { + TaskId taskId = new TaskId("123"); + assertEquals("123", taskId.getValue()); + } + + public void testEqualsWithSameId() { + TaskId taskId1 = new TaskId("456"); + TaskId taskId2 = new TaskId("456"); + assertEquals(taskId1, taskId2); + } + + public void testEqualsWithDifferentId() { + TaskId taskId1 = new TaskId("789"); + TaskId taskId2 = new TaskId("987"); + assertNotEquals(taskId1, taskId2); + } + + public void testEqualsWithNull() { + TaskId taskId = new TaskId("abc"); + assertNotEquals(null, taskId); + } + + public void testEqualsWithDifferentClass() { + TaskId taskId = new TaskId("def"); + assertNotEquals(taskId, new Object()); + } + + public void testHashCode() { + TaskId taskId1 = new TaskId("456"); + TaskId taskId2 = new TaskId("456"); + assertEquals(taskId1.hashCode(), taskId2.hashCode()); + + TaskId taskId3 = new TaskId("4567"); + assertNotEquals(taskId1.hashCode(), taskId3.hashCode()); + } +} diff --git a/libs/task-commons/src/test/java/org/opensearch/task/commons/task/TaskTests.java b/libs/task-commons/src/test/java/org/opensearch/task/commons/task/TaskTests.java new file mode 100644 index 0000000000000..37c5f543daa02 --- /dev/null +++ b/libs/task-commons/src/test/java/org/opensearch/task/commons/task/TaskTests.java @@ -0,0 +1,147 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.task.commons.task; + +import org.opensearch.task.commons.mocks.MockTaskParams; +import org.opensearch.task.commons.worker.WorkerNode; +import org.opensearch.test.OpenSearchTestCase; + +/** + * Test for {@link Task} + */ +public class TaskTests extends OpenSearchTestCase { + + public void testTaskConstructorAndGetters() { + TaskId taskId = new TaskId("123"); + TaskStatus taskStatus = TaskStatus.UNASSIGNED; + TaskParams params = new MockTaskParams("mock"); + TaskType taskType = TaskType.MERGE; + long createdAt = System.currentTimeMillis(); + long assignedAt = createdAt + 1000; + long startedAt = createdAt + 2000; + long completedAt = createdAt + 3000; + long lastHeartbeatAt = createdAt + 2500; + WorkerNode assignedNode = WorkerNode.createWorkerNode("node1", "nodeip", "nodename"); + + Task task = new Task( + taskId, + taskStatus, + params, + taskType, + createdAt, + assignedAt, + startedAt, + completedAt, + lastHeartbeatAt, + assignedNode + ); + + assertEquals(taskId, task.getTaskId()); + assertEquals(taskStatus, task.getTaskStatus()); + assertEquals(params, task.getParams()); + assertEquals(taskType, task.getTaskType()); + assertEquals(createdAt, task.getCreatedAt()); + assertEquals(assignedAt, task.getAssignedAt()); + assertEquals(startedAt, task.getStartedAt()); + assertEquals(completedAt, task.getCompletedAt()); + assertEquals(lastHeartbeatAt, task.getLastHeartbeatAt()); + assertEquals(assignedNode, task.getAssignedNode()); + } + + public void testBuilderFromTask() { + TaskId taskId = new TaskId("123"); + TaskStatus taskStatus = TaskStatus.UNASSIGNED; + TaskParams params = new MockTaskParams("mock"); + TaskType taskType = TaskType.MERGE; + long createdAt = System.currentTimeMillis(); + long assignedAt = createdAt + 1000; + long startedAt = createdAt + 2000; + long completedAt = createdAt + 3000; + long lastHeartbeatAt = createdAt + 2500; + WorkerNode assignedNode = WorkerNode.createWorkerNode("node1", "nodeip", "nodename"); + + Task originalTask = new Task( + taskId, + taskStatus, + params, + taskType, + createdAt, + assignedAt, + startedAt, + completedAt, + lastHeartbeatAt, + assignedNode + ); + + Task.Builder builder = Task.Builder.builder(originalTask); + Task newTask = builder.build(); + + assertEquals(originalTask.getTaskId(), newTask.getTaskId()); + assertEquals(originalTask.getTaskStatus(), newTask.getTaskStatus()); + assertEquals(originalTask.getParams(), newTask.getParams()); + assertEquals(originalTask.getTaskType(), newTask.getTaskType()); + assertEquals(originalTask.getCreatedAt(), newTask.getCreatedAt()); + assertEquals(originalTask.getAssignedAt(), newTask.getAssignedAt()); + assertEquals(originalTask.getStartedAt(), newTask.getStartedAt()); + assertEquals(originalTask.getCompletedAt(), newTask.getCompletedAt()); + assertEquals(originalTask.getLastHeartbeatAt(), newTask.getLastHeartbeatAt()); + assertEquals(originalTask.getAssignedNode(), newTask.getAssignedNode()); + } + + public void testBuilderFromAttributes() { + TaskId taskId = new TaskId("123"); + TaskStatus taskStatus = TaskStatus.UNASSIGNED; + TaskParams params = new MockTaskParams("mock"); + TaskType taskType = TaskType.MERGE; + long createdAt = System.currentTimeMillis(); + + Task.Builder builder = Task.Builder.builder(taskId, taskStatus, params, taskType, createdAt); + builder.assignedAt(createdAt + 1000); + builder.startedAt(createdAt + 2000); + builder.completedAt(createdAt + 3000); + builder.lastHeartbeatAt(createdAt + 2500); + builder.assignedNode(WorkerNode.createWorkerNode("node1", "nodeip", "nodename")); + builder.taskStatus(TaskStatus.ACTIVE); + + Task task = builder.build(); + + assertEquals(taskId, task.getTaskId()); + assertEquals(TaskStatus.ACTIVE, task.getTaskStatus()); + assertEquals(params, task.getParams()); + assertEquals(taskType, task.getTaskType()); + assertEquals(createdAt, task.getCreatedAt()); + assertEquals(createdAt + 1000, task.getAssignedAt()); + assertEquals(createdAt + 2000, task.getStartedAt()); + assertEquals(createdAt + 3000, task.getCompletedAt()); + assertEquals(createdAt + 2500, task.getLastHeartbeatAt()); + assertEquals(WorkerNode.createWorkerNode("node1", "nodeip", "nodename"), task.getAssignedNode()); + } + + public void testBuilderWithNullOptionalFields() { + TaskId taskId = new TaskId("123"); + TaskStatus taskStatus = TaskStatus.UNASSIGNED; + TaskParams params = new MockTaskParams("mock"); + TaskType taskType = TaskType.MERGE; + long createdAt = System.currentTimeMillis(); + + Task.Builder builder = Task.Builder.builder(taskId, taskStatus, params, taskType, createdAt); + Task task = builder.build(); + + assertEquals(taskId, task.getTaskId()); + assertEquals(taskStatus, task.getTaskStatus()); + assertEquals(params, task.getParams()); + assertEquals(taskType, task.getTaskType()); + assertEquals(createdAt, task.getCreatedAt()); + assertEquals(0, task.getAssignedAt()); + assertEquals(0, task.getStartedAt()); + assertEquals(0, task.getCompletedAt()); + assertEquals(0, task.getLastHeartbeatAt()); + assertNull(task.getAssignedNode()); + } +} diff --git a/libs/task-commons/src/test/java/org/opensearch/task/commons/worker/WorkerNodeTests.java b/libs/task-commons/src/test/java/org/opensearch/task/commons/worker/WorkerNodeTests.java new file mode 100644 index 0000000000000..07570bf947a27 --- /dev/null +++ b/libs/task-commons/src/test/java/org/opensearch/task/commons/worker/WorkerNodeTests.java @@ -0,0 +1,52 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.task.commons.worker; + +import org.opensearch.test.OpenSearchTestCase; + +/** + * Tests for {@link WorkerNode} + */ +public class WorkerNodeTests extends OpenSearchTestCase { + + public void testCreateWorkerNode() { + WorkerNode worker = WorkerNode.createWorkerNode("1", "Worker1", "192.168.1.1"); + assertNotNull(worker); + assertEquals("1", worker.getId()); + assertEquals("Worker1", worker.getName()); + assertEquals("192.168.1.1", worker.getIp()); + } + + public void testEquality() { + WorkerNode worker1 = WorkerNode.createWorkerNode("5", "Worker5", "192.168.1.5"); + WorkerNode worker2 = WorkerNode.createWorkerNode("5", "Worker5", "192.168.1.5"); + WorkerNode worker3 = WorkerNode.createWorkerNode("6", "Worker6", "192.168.1.6"); + + assertEquals(worker1, worker2); + assertNotEquals(worker1, worker3); + assertNotEquals(worker2, worker3); + } + + public void testHashCode() { + WorkerNode worker1 = WorkerNode.createWorkerNode("7", "Worker7", "192.168.1.7"); + WorkerNode worker2 = WorkerNode.createWorkerNode("7", "Worker7", "192.168.1.7"); + + assertEquals(worker1.hashCode(), worker2.hashCode()); + } + + public void testNotEqualToNull() { + WorkerNode worker = WorkerNode.createWorkerNode("8", "Worker8", "192.168.1.8"); + assertNotEquals(null, worker); + } + + public void testNotEqualToDifferentClass() { + WorkerNode worker = WorkerNode.createWorkerNode("9", "Worker9", "192.168.1.9"); + assertNotEquals(worker, new Object()); + } +} diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/HyphenationCompoundWordTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/HyphenationCompoundWordTokenFilterFactory.java index 8d29a347caeb8..181ebe5500ee5 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/HyphenationCompoundWordTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/HyphenationCompoundWordTokenFilterFactory.java @@ -54,11 +54,16 @@ */ public class HyphenationCompoundWordTokenFilterFactory extends AbstractCompoundWordTokenFilterFactory { + private final boolean noSubMatches; + private final boolean noOverlappingMatches; private final HyphenationTree hyphenationTree; HyphenationCompoundWordTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, env, name, settings); + noSubMatches = settings.getAsBoolean("no_sub_matches", false); + noOverlappingMatches = settings.getAsBoolean("no_overlapping_matches", false); + String hyphenationPatternsPath = settings.get("hyphenation_patterns_path", null); if (hyphenationPatternsPath == null) { throw new IllegalArgumentException("hyphenation_patterns_path is a required setting."); @@ -85,7 +90,9 @@ public TokenStream create(TokenStream tokenStream) { minWordSize, minSubwordSize, maxSubwordSize, - onlyLongestMatch + onlyLongestMatch, + noSubMatches, + noOverlappingMatches ); } } diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/MinHashTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/MinHashTokenFilterFactory.java index e76354ae3a765..40655b84794d5 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/MinHashTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/MinHashTokenFilterFactory.java @@ -65,10 +65,10 @@ private Map convertSettings(Settings settings) { if (settings.hasValue("hash_count")) { settingMap.put("hashCount", settings.get("hash_count")); } - if (settings.hasValue("bucketCount")) { + if (settings.hasValue("bucket_count")) { settingMap.put("bucketCount", settings.get("bucket_count")); } - if (settings.hasValue("hashSetSize")) { + if (settings.hasValue("hash_set_size")) { settingMap.put("hashSetSize", settings.get("hash_set_size")); } if (settings.hasValue("with_rotation")) { diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CompoundAnalysisTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CompoundAnalysisTests.java index a681d9a104ecf..bfe30da50c055 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CompoundAnalysisTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CompoundAnalysisTests.java @@ -50,8 +50,12 @@ import org.opensearch.test.IndexSettingsModule; import org.opensearch.test.OpenSearchTestCase; import org.hamcrest.MatcherAssert; +import org.junit.Before; import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; import java.util.List; @@ -63,17 +67,27 @@ import static org.hamcrest.Matchers.instanceOf; public class CompoundAnalysisTests extends OpenSearchTestCase { + + Settings[] settingsArr; + + @Before + public void initialize() throws IOException { + final Path home = createTempDir(); + copyHyphenationPatternsFile(home); + this.settingsArr = new Settings[] { getJsonSettings(home), getYamlSettings(home) }; + } + public void testDefaultsCompoundAnalysis() throws Exception { - Settings settings = getJsonSettings(); - IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("test", settings); - AnalysisModule analysisModule = createAnalysisModule(settings); - TokenFilterFactory filterFactory = analysisModule.getAnalysisRegistry().buildTokenFilterFactories(idxSettings).get("dict_dec"); - MatcherAssert.assertThat(filterFactory, instanceOf(DictionaryCompoundWordTokenFilterFactory.class)); + for (Settings settings : this.settingsArr) { + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("test", settings); + AnalysisModule analysisModule = createAnalysisModule(settings); + TokenFilterFactory filterFactory = analysisModule.getAnalysisRegistry().buildTokenFilterFactories(idxSettings).get("dict_dec"); + MatcherAssert.assertThat(filterFactory, instanceOf(DictionaryCompoundWordTokenFilterFactory.class)); + } } public void testDictionaryDecompounder() throws Exception { - Settings[] settingsArr = new Settings[] { getJsonSettings(), getYamlSettings() }; - for (Settings settings : settingsArr) { + for (Settings settings : this.settingsArr) { List terms = analyze(settings, "decompoundingAnalyzer", "donaudampfschiff spargelcremesuppe"); MatcherAssert.assertThat(terms.size(), equalTo(8)); MatcherAssert.assertThat( @@ -83,6 +97,26 @@ public void testDictionaryDecompounder() throws Exception { } } + // Hyphenation Decompounder tests mimic the behavior of lucene tests + // lucene/analysis/common/src/test/org/apache/lucene/analysis/compound/TestHyphenationCompoundWordTokenFilterFactory.java + public void testHyphenationDecompounder() throws Exception { + for (Settings settings : this.settingsArr) { + List terms = analyze(settings, "hyphenationAnalyzer", "min veninde som er lidt af en læsehest"); + MatcherAssert.assertThat(terms.size(), equalTo(10)); + MatcherAssert.assertThat(terms, hasItems("min", "veninde", "som", "er", "lidt", "af", "en", "læsehest", "læse", "hest")); + } + } + + // Hyphenation Decompounder tests mimic the behavior of lucene tests + // lucene/analysis/common/src/test/org/apache/lucene/analysis/compound/TestHyphenationCompoundWordTokenFilterFactory.java + public void testHyphenationDecompounderNoSubMatches() throws Exception { + for (Settings settings : this.settingsArr) { + List terms = analyze(settings, "hyphenationAnalyzerNoSubMatches", "basketballkurv"); + MatcherAssert.assertThat(terms.size(), equalTo(3)); + MatcherAssert.assertThat(terms, hasItems("basketballkurv", "basketball", "kurv")); + } + } + private List analyze(Settings settings, String analyzerName, String text) throws IOException { IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("test", settings); AnalysisModule analysisModule = createAnalysisModule(settings); @@ -111,21 +145,28 @@ public Map> getTokenFilters() { })); } - private Settings getJsonSettings() throws IOException { + private void copyHyphenationPatternsFile(Path home) throws IOException { + InputStream hyphenation_patterns_path = getClass().getResourceAsStream("da_UTF8.xml"); + Path config = home.resolve("config"); + Files.createDirectory(config); + Files.copy(hyphenation_patterns_path, config.resolve("da_UTF8.xml")); + } + + private Settings getJsonSettings(Path home) throws IOException { String json = "/org/opensearch/analysis/common/test1.json"; return Settings.builder() .loadFromStream(json, getClass().getResourceAsStream(json), false) .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .put(Environment.PATH_HOME_SETTING.getKey(), home.toString()) .build(); } - private Settings getYamlSettings() throws IOException { + private Settings getYamlSettings(Path home) throws IOException { String yaml = "/org/opensearch/analysis/common/test1.yml"; return Settings.builder() .loadFromStream(yaml, getClass().getResourceAsStream(yaml), false) .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .put(Environment.PATH_HOME_SETTING.getKey(), home.toString()) .build(); } } diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/MinHashFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/MinHashFilterFactoryTests.java index e86a939dc857b..b9f09033f49f3 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/MinHashFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/MinHashFilterFactoryTests.java @@ -50,14 +50,10 @@ public void testDefault() throws IOException { int default_bucket_size = 512; int default_hash_set_size = 1; Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build(); - OpenSearchTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings( - settings, - new CommonAnalysisModulePlugin() - ); + OpenSearchTestCase.TestAnalysis analysis = getTestAnalysisFromSettings(settings); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("min_hash"); String source = "the quick brown fox"; - Tokenizer tokenizer = new WhitespaceTokenizer(); - tokenizer.setReader(new StringReader(source)); + Tokenizer tokenizer = getTokenizer(source); // with_rotation is true by default, and hash_set_size is 1, so even though the source doesn't // have enough tokens to fill all the buckets, we still expect 512 tokens. @@ -73,17 +69,63 @@ public void testSettings() throws IOException { .put("index.analysis.filter.test_min_hash.with_rotation", false) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .build(); - OpenSearchTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings( - settings, - new CommonAnalysisModulePlugin() - ); + OpenSearchTestCase.TestAnalysis analysis = getTestAnalysisFromSettings(settings); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("test_min_hash"); String source = "sushi"; - Tokenizer tokenizer = new WhitespaceTokenizer(); - tokenizer.setReader(new StringReader(source)); + Tokenizer tokenizer = getTokenizer(source); // despite the fact that bucket_count is 2 and hash_set_size is 1, // because with_rotation is false, we only expect 1 token here. assertStreamHasNumberOfTokens(tokenFilter.create(tokenizer), 1); } + + public void testBucketCountSetting() throws IOException { + // Correct case with "bucket_count" + Settings settingsWithBucketCount = Settings.builder() + .put("index.analysis.filter.test_min_hash.type", "min_hash") + .put("index.analysis.filter.test_min_hash.hash_count", "1") + .put("index.analysis.filter.test_min_hash.bucket_count", "3") + .put("index.analysis.filter.test_min_hash.hash_set_size", "1") + .put("index.analysis.filter.test_min_hash.with_rotation", false) + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .build(); + + OpenSearchTestCase.TestAnalysis analysisWithBucketCount = getTestAnalysisFromSettings(settingsWithBucketCount); + + TokenFilterFactory tokenFilterWithBucketCount = analysisWithBucketCount.tokenFilter.get("test_min_hash"); + String sourceWithBucketCount = "salmon avocado roll uramaki"; + Tokenizer tokenizerWithBucketCount = getTokenizer(sourceWithBucketCount); + // Expect 3 tokens due to bucket_count being set to 3 + assertStreamHasNumberOfTokens(tokenFilterWithBucketCount.create(tokenizerWithBucketCount), 3); + } + + public void testHashSetSizeSetting() throws IOException { + // Correct case with "hash_set_size" + Settings settingsWithHashSetSize = Settings.builder() + .put("index.analysis.filter.test_min_hash.type", "min_hash") + .put("index.analysis.filter.test_min_hash.hash_count", "1") + .put("index.analysis.filter.test_min_hash.bucket_count", "1") + .put("index.analysis.filter.test_min_hash.hash_set_size", "2") + .put("index.analysis.filter.test_min_hash.with_rotation", false) + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .build(); + + OpenSearchTestCase.TestAnalysis analysisWithHashSetSize = getTestAnalysisFromSettings(settingsWithHashSetSize); + + TokenFilterFactory tokenFilterWithHashSetSize = analysisWithHashSetSize.tokenFilter.get("test_min_hash"); + String sourceWithHashSetSize = "salmon avocado roll uramaki"; + Tokenizer tokenizerWithHashSetSize = getTokenizer(sourceWithHashSetSize); + // Expect 2 tokens due to hash_set_size being set to 2 and bucket_count being 1 + assertStreamHasNumberOfTokens(tokenFilterWithHashSetSize.create(tokenizerWithHashSetSize), 2); + } + + private static OpenSearchTestCase.TestAnalysis getTestAnalysisFromSettings(Settings settingsWithBucketCount) throws IOException { + return AnalysisTestsHelper.createTestAnalysisFromSettings(settingsWithBucketCount, new CommonAnalysisModulePlugin()); + } + + private static Tokenizer getTokenizer(String sourceWithBucketCount) { + Tokenizer tokenizerWithBucketCount = new WhitespaceTokenizer(); + tokenizerWithBucketCount.setReader(new StringReader(sourceWithBucketCount)); + return tokenizerWithBucketCount; + } } diff --git a/modules/analysis-common/src/test/resources/org/opensearch/analysis/common/da_UTF8.xml b/modules/analysis-common/src/test/resources/org/opensearch/analysis/common/da_UTF8.xml new file mode 100644 index 0000000000000..2c8d203be6881 --- /dev/null +++ b/modules/analysis-common/src/test/resources/org/opensearch/analysis/common/da_UTF8.xml @@ -0,0 +1,1208 @@ + + + + + + + + + + +aA +bB +cC +dD +eE +fF +gG +hH +iI +jJ +kK +lL +mM +nN +oO +pP +qQ +rR +sS +tT +uU +vV +wW +xX +yY +zZ +æÆ +øØ +åÅ + + + +.ae3 +.an3k +.an1s +.be5la +.be1t +.bi4tr +.der3i +.diagno5 +.her3 +.hoved3 +.ne4t5 +.om1 +.ove4 +.po1 +.til3 +.yd5r +ab5le +3abst +a3c +ade5la +5adg +a1e +5afg +5a4f1l +af3r +af4ri +5afs +a4gef +a4gi +ag5in +ag5si +3agti +a4gy +a3h +ais5t +a3j +a5ka +a3ke +a5kr +aku5 +a3la +a1le +a1li +al3k +4alkv +a1lo +al5si +a3lu +a1ly +am4pa +3analy +an4k5r +a3nu +3anv +a5o +a5pe +a3pi +a5po +a1ra +ar5af +1arb +a1re +5arg +a1ri +a3ro +a3sa +a3sc +a1si +a3sk +a3so +3a3sp +a3ste +a3sti +a1ta1 +a1te +a1ti +a4t5in +a1to +ato5v +a5tr +a1tu +a5va +a1ve +a5z +1ba +ba4ti +4bd +1be +be1k +be3ro +be5ru +be1s4 +be1tr +1bi +bi5sk +b1j +4b1n +1bo +bo4gr +bo3ra +bo5re +1br4 +4bs +bs5k +b3so +b1st +b5t +3bu +bu4s5tr +b5w +1by +by5s +4c1c +1ce +ce5ro +3ch +4ch. +ci4o +ck3 +5cy +3da +4d3af +d5anta +da4s +d1b +d1d4 +1de +de5d +4de4lem +der5eri +de4rig +de5sk +d1f +d1g +d3h +1di +di1e +di5l +d3j +d1k +d1l +d1m +4d1n +3do +4dop +d5ov +d1p +4drett +5d4reve +3drif +3driv +d5ros +d5ru +ds5an +ds5in +d1ski +d4sm +d4su +dsu5l +ds5vi +d3ta +d1te +dt5o +d5tr +dt5u +1du +dub5 +d1v +3dy +e5ad +e3af +e5ag +e3ak +e1al +ea4la +e3an +e5ap +e3at +e3bl +ebs3 +e1ci +ed5ar +edde4 +eddel5 +e4do +ed5ra +ed3re +ed3rin +ed4str +e3e +3eff +e3fr +3eft +e3gu +e1h +e3in +ei5s +e3je +e4j5el +e1ka +e3ke +e3kl +4e1ko +e5kr +ek5sa +3eksem +3eksp +e3ku +e1kv +e5ky +e3lad +el3ak +el3ar +e1las +e3le +e4lek +3elem +e1li +5elim +e3lo +el5sa +e5lu +e3ly +e4mad +em4p5le +em1s +en5ak +e4nan +4enn +e4no +en3so +e5nu +e5ol +e3op +e1or +e3ov +epi3 +e1pr +e3ra +er3af +e4rag +e4rak +e1re +e4ref +er5ege +5erhv +e1ri +e4rib +er1k +ero5d +er5ov +er3s +er5tr +e3rum +er5un +e5ry +e1ta +e1te +etek4s +e1ti +e3tj +e1to +e3tr +e3tu +e1ty +e3um +e3un +3eur +e1va +e3ve +e4v3erf +e1vi +e5x +1fa +fa4ce +fags3 +f1b +f1d +1fe +fej4 +fejl1 +f1f +f1g +f1h +1fi +f1k +3fl +1fo +for1en +fo4ri +f1p +f1s4 +4ft +f3ta +f1te +f1ti +f5to +f5tvi +1fu +f1v +3fy +1ga +g3art +g1b +g1d +1ge +4g5enden +ger3in +ge3s +g3f +g1g +g1h +1gi +gi4b +gi3st +5gj +g3k +g1l +g1m +3go +4g5om +g5ov +g3p +1gr +gs1a +gsde4len +g4se +gsha4 +g5sla +gs3or +gs1p +g5s4tide +g4str +gs1v +g3ta +g1te +g1ti +g5to +g3tr +gt4s +g3ud +gun5 +g3v +1gy +g5yd +4ha. +heds3 +he5s +4het +hi4e +hi4n5 +hi3s +ho5ko +ho5ve +4h3t +hun4 +hund3 +hvo4 +i1a +i3b +i4ble +i1c +i3dr +ids5k +i1el +i1en +i3er +i3et. +if3r +i3gu +i3h +i5i +i5j +i1ka +i1ke +ik1l +i5ko +ik3re +ik5ri +iks5t +ik4tu +i3ku +ik3v +i3lag +il3eg +il5ej +il5el +i3li +i4l5id +il3k +i1lo +il5u +i3mu +ind3t +5inf +ings1 +in3s +in4sv +inter1 +i3nu +i3od +i3og +i5ok +i3ol +ion4 +ions1 +i5o5r +i3ot +i5pi +i3pli +i5pr +i3re +i3ri +ir5t +i3sc +i3si +i4sm +is3p +i1ster +i3sti +i5sua +i1ta +i1te +i1ti +i3to +i3tr +it5re. +i1tu +i3ty +i1u +i1va +i1ve +i1vi +j3ag +jde4rer +jds1 +jek4to +4j5en. +j5k +j3le +j3li +jlmeld5 +jlmel4di +j3r +jre5 +ju3s +5kap +k5au +5kav +k5b +kel5s +ke3sk +ke5st +ke4t5a +k3h +ki3e +ki3st +k1k +k5lak +k1le +3klu +k4ny +5kod +1kon +ko3ra +3kort +ko3v +1kra +5kry +ks3an +k1si +ks3k +ks1p +k3ste +k5stu +ks5v +k1t +k4tar +k4terh +kti4e +kt5re +kt5s +3kur +1kus +3kut +k4vo +k4vu +5lab +lad3r +5lagd +la4g3r +5lam +1lat +l1b +ldiagnos5 +l3dr +ld3st +1le. +5led +4lele +le4mo +3len +1ler +1les +4leu +l1f +lfin4 +lfind5 +l1go1 +l3h +li4ga +4l5ins +4l3int +li5o +l3j +l1ke +l1ko +l3ky +l1l +l5mu +lo4du +l3op +4l5or +3lov +4l3p +l4ps +l3r +4ls +lses1 +ls5in +l5sj +l1ta +l4taf +l1te +l4t5erf +l3ti +lt3o +l3tr +l3tu +lu5l +l3ve +l3vi +1ma +m1b +m3d +1me +4m5ej +m3f +m1g +m3h +1mi +mi3k +m5ing +mi4o +mi5sty +m3k +m1l +m1m +mmen5 +m1n +3mo +mo4da +4mop +4m5ov +m1pe +m3pi +m3pl +m1po +m3pr +m1r +mse5s +ms5in +m5sk +ms3p +m3ste +ms5v +m3ta +m3te +m3ti +m3tr +m1ud +1mul +mu1li +3my +3na +4nak +1nal +n1b +n1c +4nd +n3dr +nd5si +nd5sk +nd5sp +1ne +ne5a +ne4da +nemen4 +nement5e +neo4 +n3erk +n5erl +ne5sl +ne5st +n1f +n4go +4n1h +1ni +4nim +ni5o +ni3st +n1ke +n1ko +n3kr +n3ku +n5kv +4n1l +n1m +n1n +1no +n3ord +n5p +n3r +4ns +n3si +n1sku +ns3po +n1sta +n5sti +n1ta +nta4le +n1te +n1ti +ntiali4 +n3to +n1tr +nt4s5t +nt4su +n3tu +n3ty +4n1v +3ny +n3z +o3a +o4as +ob3li +o1c +o4din +od5ri +od5s +od5un +o1e +of5r +o4gek +o4gel +o4g5o +og5re +og5sk +o5h +o5in +oi6s5e +o1j +o3ka +o1ke +o3ku +o3la +o3le +o1li +o1lo +o3lu +o5ly +1omr +on3k +ook5 +o3or +o5ov +o3pi +op3l +op3r +op3s +3opta +4or. +or1an +3ordn +ord5s +o3re. +o3reg +o3rek +o3rer +o3re3s +o3ret +o3ri +3orient +or5im +o4r5in +or3k +or5o +or3sl +or3st +o3si +o3so +o3t +o1te +o5un +ov4s +3pa +pa5gh +p5anl +p3d +4pec +3pen +1per +pe1ra +pe5s +pe3u +p3f +4p5h +1pla +p4lan +4ple. +4pler +4ples +p3m +p3n +5pok +4po3re +3pot +4p5p4 +p4ro +1proc +p3sk +p5so +ps4p +p3st +p1t +1pu +pu5b +p5ule +p5v +5py3 +qu4 +4raf +ra5is +4rarb +r1b +r4d5ar +r3dr +rd4s3 +4reks +1rel +re5la +r5enss +5rese +re5spo +4ress +re3st +re5s4u +5rett +r1f +r1gu +r1h +ri1e +ri5la +4rimo +r4ing +ringse4 +ringso4r +4rinp +4rint +r3ka +r1ke +r1ki +rk3so +r3ku +r1l +rmo4 +r5mu +r1n +ro1b +ro3p +r3or +r3p +r1r +rre5s +rro4n5 +r1sa +r1si +r5skr +r4sk5v +rs4n +r3sp +r5stu +r5su +r3sv +r5tal +r1te +r4teli +r1ti +r3to +r4t5or +rt5rat +rt3re +r5tri +r5tro +rt3s +r5ty +r3ud +run4da +5rut +r3va +r1ve +r3vi +ry4s +s3af +1sam +sa4ma +s3ap +s1ar +1sat +4s1b +s1d +sdy4 +1se +s4ed +5s4er +se4se +s1f +4s1g4 +4s3h +si4bl +1sig +s5int +5sis +5sit +5siu +s5ju +4sk. +1skab +1ske +s3kl +sk5s4 +5sky +s1le +s1li +slo3 +5slu +s5ly +s1m +s4my +4snin +s4nit +so5k +5sol +5som. +3somm +s5oms +5somt +3son +4s1op +sp4 +3spec +4sper +3s4pi +s1pl +3sprog. +s5r4 +s1s4 +4st. +5s4tam +1stan +st5as +3stat +1stav +1ste. +1sted +3stel +5stemo +1sten +5step +3ster. +3stes +5stet +5stj +3sto +st5om +1str +s1ud +3sul +s3un +3sur +s3ve +3s4y +1sy1s +5ta. +1tag +tands3 +4tanv +4tb +tede4l +teds5 +3teg +5tekn +teo1 +5term +te5ro +4t1f +6t3g +t1h +tialis5t +3tid +ti4en +ti3st +4t3k +4t1l +tli4s5 +t1m +t1n +to5ra +to1re +to1ri +tor4m +4t3p +t4ra +4tres +tro5v +1try +4ts +t3si +ts4pa +ts5pr +t3st +ts5ul +4t1t +t5uds +5tur +t5ve +1typ +u1a +5udl +ud5r +ud3s +3udv +u1e +ue4t5 +uge4ri +ugs3 +u5gu +u3i +u5kl +uk4ta +uk4tr +u1la +u1le +u5ly +u5pe +up5l +u5q +u3ra +u3re +u4r3eg +u1rer +u3ro +us5a +u3si +u5ska +u5so +us5v +u1te +u1ti +u1to +ut5r +ut5s4 +5u5v +va5d +3varm +1ved +ve4l5e +ve4reg +ve3s +5vet +v5h +vi4l3in +1vis +v5j +v5k +vl4 +v3le +v5li +vls1 +1vo +4v5om +v5p +v5re +v3st +v5su +v5t +3vu +y3a +y5dr +y3e +y3ke +y5ki +yk3li +y3ko +yk4s5 +y3kv +y5li +y5lo +y5mu +yns5 +y5o +y1pe +y3pi +y3re +yr3ek +y3ri +y3si +y3ti +y5t3r +y5ve +zi5o + +.så3 +.ær5i +.øv3r +a3tø +a5væ +brød3 +5bæ +5drøv +dstå4 +3dæ +3dø +e3læ +e3lø +e3rø +er5øn +e5tæ +e5tø +e1væ +e3æ +e5å +3fæ +3fø +fø4r5en +giø4 +g4sø +g5så +3gæ +3gø1 +3gå +i5tæ +i3ø +3kø +3kå +lingeniø4 +l3væ +5løs +m5tå +1mæ +3mø +3må +n3kæ +n5tæ +3næ +4n5æb +5nø +o5læ +or3ø +o5å +5præ +5pæd +på3 +r5kæ +r5tæ +r5tø +r3væ +r5æl +4røn +5rør +3råd +r5år +s4kå +3slå +s4næ +5stø +1stå +1sæ +4s5æn +1sø +s5øk +så4r5 +ti4ø +3træk. +t4sø +t5så +t3væ +u3læ +3værd +1værk +5vå +y5væ +æb3l +æ3c +æ3e +æg5a +æ4gek +æ4g5r +ægs5 +æ5i +æ5kv +ælle4 +æn1dr +æ5o +æ1re +ær4g5r +æ3ri +ær4ma +ær4mo +ær5s +æ5si +æ3so +æ3ste +æ3ve +øde5 +ø3e +ø1je +ø3ke +ø3le +øms5 +øn3st +øn4t3 +ø1re +ø3ri +ørne3 +ør5o +ø1ve +å1d +å1e +å5h +å3l +å3re +års5t +å5sk +å3t + + diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/70_bulk.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/70_bulk.yml index 47cc80d6df310..ecd56ea7f277e 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/70_bulk.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/70_bulk.yml @@ -178,7 +178,10 @@ teardown: - skip: version: " - 2.15.99" reason: "fixed in 2.16.0" + features: allowed_warnings - do: + allowed_warnings: + - "index template [test_for_bulk_upsert_index_template] has index patterns [test_bulk_upsert_*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [test_for_bulk_upsert_index_template] will take precedence during new index creation" indices.put_index_template: name: test_for_bulk_upsert_index_template body: diff --git a/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/IngestGeoIpModulePlugin.java b/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/IngestGeoIpModulePlugin.java index 524d1719c37c0..742b923f05199 100644 --- a/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/IngestGeoIpModulePlugin.java +++ b/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/IngestGeoIpModulePlugin.java @@ -44,6 +44,7 @@ import org.opensearch.common.cache.CacheBuilder; import org.opensearch.common.io.PathUtils; import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; import org.opensearch.common.util.io.IOUtils; import org.opensearch.ingest.Processor; import org.opensearch.plugins.IngestPlugin; @@ -62,10 +63,18 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Set; import java.util.function.Function; +import java.util.stream.Collectors; import java.util.stream.Stream; public class IngestGeoIpModulePlugin extends Plugin implements IngestPlugin, Closeable { + static final Setting> PROCESSORS_ALLOWLIST_SETTING = Setting.listSetting( + "ingest.geoip.processors.allowed", + List.of(), + Function.identity(), + Setting.Property.NodeScope + ); public static final Setting CACHE_SIZE = Setting.longSetting("ingest.geoip.cache_size", 1000, 0, Setting.Property.NodeScope); static String[] DEFAULT_DATABASE_FILENAMES = new String[] { "GeoLite2-ASN.mmdb", "GeoLite2-City.mmdb", "GeoLite2-Country.mmdb" }; @@ -74,7 +83,7 @@ public class IngestGeoIpModulePlugin extends Plugin implements IngestPlugin, Clo @Override public List> getSettings() { - return Arrays.asList(CACHE_SIZE); + return Arrays.asList(CACHE_SIZE, PROCESSORS_ALLOWLIST_SETTING); } @Override @@ -90,7 +99,10 @@ public Map getProcessors(Processor.Parameters paramet } catch (IOException e) { throw new RuntimeException(e); } - return Collections.singletonMap(GeoIpProcessor.TYPE, new GeoIpProcessor.Factory(databaseReaders, new GeoIpCache(cacheSize))); + return filterForAllowlistSetting( + parameters.env.settings(), + Map.of(GeoIpProcessor.TYPE, new GeoIpProcessor.Factory(databaseReaders, new GeoIpCache(cacheSize))) + ); } /* @@ -175,6 +187,30 @@ public void close() throws IOException { } } + private Map filterForAllowlistSetting(Settings settings, Map map) { + if (PROCESSORS_ALLOWLIST_SETTING.exists(settings) == false) { + return Map.copyOf(map); + } + final Set allowlist = Set.copyOf(PROCESSORS_ALLOWLIST_SETTING.get(settings)); + // Assert that no unknown processors are defined in the allowlist + final Set unknownAllowlistProcessors = allowlist.stream() + .filter(p -> map.containsKey(p) == false) + .collect(Collectors.toUnmodifiableSet()); + if (unknownAllowlistProcessors.isEmpty() == false) { + throw new IllegalArgumentException( + "Processor(s) " + + unknownAllowlistProcessors + + " were defined in [" + + PROCESSORS_ALLOWLIST_SETTING.getKey() + + "] but do not exist" + ); + } + return map.entrySet() + .stream() + .filter(e -> allowlist.contains(e.getKey())) + .collect(Collectors.toUnmodifiableMap(Map.Entry::getKey, Map.Entry::getValue)); + } + /** * The in-memory cache for the geoip data. There should only be 1 instance of this class.. * This cache differs from the maxmind's {@link NodeCache} such that this cache stores the deserialized Json objects to avoid the diff --git a/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/IngestGeoIpModulePluginTests.java b/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/IngestGeoIpModulePluginTests.java index f6d42d1d65670..9446ec1228532 100644 --- a/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/IngestGeoIpModulePluginTests.java +++ b/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/IngestGeoIpModulePluginTests.java @@ -35,8 +35,20 @@ import com.maxmind.geoip2.model.AbstractResponse; import org.opensearch.common.network.InetAddresses; +import org.opensearch.common.settings.Settings; +import org.opensearch.env.TestEnvironment; +import org.opensearch.ingest.Processor; import org.opensearch.ingest.geoip.IngestGeoIpModulePlugin.GeoIpCache; import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.StreamsUtils; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.List; +import java.util.Set; import static org.mockito.Mockito.mock; @@ -77,4 +89,87 @@ public void testInvalidInit() { IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> new GeoIpCache(-1)); assertEquals("geoip max cache size must be 0 or greater", ex.getMessage()); } + + public void testAllowList() throws IOException { + runAllowListTest(List.of()); + runAllowListTest(List.of("geoip")); + } + + public void testInvalidAllowList() throws IOException { + List invalidAllowList = List.of("set"); + Settings.Builder settingsBuilder = Settings.builder() + .putList(IngestGeoIpModulePlugin.PROCESSORS_ALLOWLIST_SETTING.getKey(), invalidAllowList); + createDb(settingsBuilder); + try (IngestGeoIpModulePlugin plugin = new IngestGeoIpModulePlugin()) { + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> plugin.getProcessors(createParameters(settingsBuilder.build())) + ); + assertEquals( + "Processor(s) " + + invalidAllowList + + " were defined in [" + + IngestGeoIpModulePlugin.PROCESSORS_ALLOWLIST_SETTING.getKey() + + "] but do not exist", + e.getMessage() + ); + } + } + + public void testAllowListNotSpecified() throws IOException { + Settings.Builder settingsBuilder = Settings.builder(); + settingsBuilder.remove(IngestGeoIpModulePlugin.PROCESSORS_ALLOWLIST_SETTING.getKey()); + createDb(settingsBuilder); + try (IngestGeoIpModulePlugin plugin = new IngestGeoIpModulePlugin()) { + final Set expected = Set.of("geoip"); + assertEquals(expected, plugin.getProcessors(createParameters(settingsBuilder.build())).keySet()); + } + } + + private void runAllowListTest(List allowList) throws IOException { + Settings.Builder settingsBuilder = Settings.builder(); + createDb(settingsBuilder); + final Settings settings = settingsBuilder.putList(IngestGeoIpModulePlugin.PROCESSORS_ALLOWLIST_SETTING.getKey(), allowList).build(); + try (IngestGeoIpModulePlugin plugin = new IngestGeoIpModulePlugin()) { + assertEquals(Set.copyOf(allowList), plugin.getProcessors(createParameters(settings)).keySet()); + } + } + + private void createDb(Settings.Builder settingsBuilder) throws IOException { + Path configDir = createTempDir(); + Path userAgentConfigDir = configDir.resolve("ingest-geoip"); + Files.createDirectories(userAgentConfigDir); + settingsBuilder.put("ingest.geoip.database_path", configDir).put("path.home", configDir); + try { + Files.copy( + new ByteArrayInputStream(StreamsUtils.copyToBytesFromClasspath("/GeoLite2-City.mmdb")), + configDir.resolve("GeoLite2-City.mmdb") + ); + Files.copy( + new ByteArrayInputStream(StreamsUtils.copyToBytesFromClasspath("/GeoLite2-Country.mmdb")), + configDir.resolve("GeoLite2-Country.mmdb") + ); + Files.copy( + new ByteArrayInputStream(StreamsUtils.copyToBytesFromClasspath("/GeoLite2-ASN.mmdb")), + configDir.resolve("GeoLite2-ASN.mmdb") + ); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + private static Processor.Parameters createParameters(Settings settings) { + return new Processor.Parameters( + TestEnvironment.newEnvironment(settings), + null, + null, + null, + () -> 0L, + (a, b) -> null, + null, + null, + $ -> {}, + null + ); + } } diff --git a/modules/ingest-user-agent/src/main/java/org/opensearch/ingest/useragent/IngestUserAgentModulePlugin.java b/modules/ingest-user-agent/src/main/java/org/opensearch/ingest/useragent/IngestUserAgentModulePlugin.java index cd96fcf2347ef..bac90d20b44e1 100644 --- a/modules/ingest-user-agent/src/main/java/org/opensearch/ingest/useragent/IngestUserAgentModulePlugin.java +++ b/modules/ingest-user-agent/src/main/java/org/opensearch/ingest/useragent/IngestUserAgentModulePlugin.java @@ -33,6 +33,7 @@ package org.opensearch.ingest.useragent; import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; import org.opensearch.ingest.Processor; import org.opensearch.plugins.IngestPlugin; import org.opensearch.plugins.Plugin; @@ -47,10 +48,19 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; +import java.util.function.Function; +import java.util.stream.Collectors; import java.util.stream.Stream; public class IngestUserAgentModulePlugin extends Plugin implements IngestPlugin { + static final Setting> PROCESSORS_ALLOWLIST_SETTING = Setting.listSetting( + "ingest.useragent.processors.allowed", + List.of(), + Function.identity(), + Setting.Property.NodeScope + ); private final Setting CACHE_SIZE_SETTING = Setting.longSetting( "ingest.user_agent.cache_size", 1000, @@ -77,7 +87,34 @@ public Map getProcessors(Processor.Parameters paramet } catch (IOException e) { throw new RuntimeException(e); } - return Collections.singletonMap(UserAgentProcessor.TYPE, new UserAgentProcessor.Factory(userAgentParsers)); + return filterForAllowlistSetting( + parameters.env.settings(), + Collections.singletonMap(UserAgentProcessor.TYPE, new UserAgentProcessor.Factory(userAgentParsers)) + ); + } + + private Map filterForAllowlistSetting(Settings settings, Map map) { + if (PROCESSORS_ALLOWLIST_SETTING.exists(settings) == false) { + return Map.copyOf(map); + } + final Set allowlist = Set.copyOf(PROCESSORS_ALLOWLIST_SETTING.get(settings)); + // Assert that no unknown processors are defined in the allowlist + final Set unknownAllowlistProcessors = allowlist.stream() + .filter(p -> map.containsKey(p) == false) + .collect(Collectors.toUnmodifiableSet()); + if (unknownAllowlistProcessors.isEmpty() == false) { + throw new IllegalArgumentException( + "Processor(s) " + + unknownAllowlistProcessors + + " were defined in [" + + PROCESSORS_ALLOWLIST_SETTING.getKey() + + "] but do not exist" + ); + } + return map.entrySet() + .stream() + .filter(e -> allowlist.contains(e.getKey())) + .collect(Collectors.toUnmodifiableMap(Map.Entry::getKey, Map.Entry::getValue)); } static Map createUserAgentParsers(Path userAgentConfigDirectory, UserAgentCache cache) throws IOException { diff --git a/modules/ingest-user-agent/src/test/java/org/opensearch/ingest/useragent/IngestUserAgentModulePluginTests.java b/modules/ingest-user-agent/src/test/java/org/opensearch/ingest/useragent/IngestUserAgentModulePluginTests.java new file mode 100644 index 0000000000000..31fdafff1188a --- /dev/null +++ b/modules/ingest-user-agent/src/test/java/org/opensearch/ingest/useragent/IngestUserAgentModulePluginTests.java @@ -0,0 +1,114 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ingest.useragent; + +import org.opensearch.common.settings.Settings; +import org.opensearch.env.TestEnvironment; +import org.opensearch.ingest.Processor; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; + +import java.io.BufferedReader; +import java.io.BufferedWriter; +import java.io.IOException; +import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.List; +import java.util.Set; + +public class IngestUserAgentModulePluginTests extends OpenSearchTestCase { + private Settings.Builder settingsBuilder; + + @Before + public void setup() throws IOException { + Path configDir = createTempDir(); + Path userAgentConfigDir = configDir.resolve("ingest-user-agent"); + Files.createDirectories(userAgentConfigDir); + settingsBuilder = Settings.builder().put("ingest-user-agent", configDir).put("path.home", configDir); + + // Copy file, leaving out the device parsers at the end + String regexWithoutDevicesFilename = "regexes_without_devices.yml"; + try ( + BufferedReader reader = new BufferedReader( + new InputStreamReader(UserAgentProcessor.class.getResourceAsStream("/regexes.yml"), StandardCharsets.UTF_8) + ); + BufferedWriter writer = Files.newBufferedWriter(userAgentConfigDir.resolve(regexWithoutDevicesFilename)); + ) { + String line; + while ((line = reader.readLine()) != null) { + if (line.startsWith("device_parsers:")) { + break; + } + + writer.write(line); + writer.newLine(); + } + } + } + + public void testAllowList() throws IOException { + runAllowListTest(List.of()); + runAllowListTest(List.of("user_agent")); + } + + public void testInvalidAllowList() throws IOException { + List invalidAllowList = List.of("set"); + final Settings settings = settingsBuilder.putList( + IngestUserAgentModulePlugin.PROCESSORS_ALLOWLIST_SETTING.getKey(), + invalidAllowList + ).build(); + try (IngestUserAgentModulePlugin plugin = new IngestUserAgentModulePlugin()) { + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> plugin.getProcessors(createParameters(settings)) + ); + assertEquals( + "Processor(s) " + + invalidAllowList + + " were defined in [" + + IngestUserAgentModulePlugin.PROCESSORS_ALLOWLIST_SETTING.getKey() + + "] but do not exist", + e.getMessage() + ); + } + } + + public void testAllowListNotSpecified() throws IOException { + settingsBuilder.remove(IngestUserAgentModulePlugin.PROCESSORS_ALLOWLIST_SETTING.getKey()); + try (IngestUserAgentModulePlugin plugin = new IngestUserAgentModulePlugin()) { + final Set expected = Set.of("user_agent"); + assertEquals(expected, plugin.getProcessors(createParameters(settingsBuilder.build())).keySet()); + } + } + + private void runAllowListTest(List allowList) throws IOException { + final Settings settings = settingsBuilder.putList(IngestUserAgentModulePlugin.PROCESSORS_ALLOWLIST_SETTING.getKey(), allowList) + .build(); + try (IngestUserAgentModulePlugin plugin = new IngestUserAgentModulePlugin()) { + assertEquals(Set.copyOf(allowList), plugin.getProcessors(createParameters(settings)).keySet()); + } + } + + private static Processor.Parameters createParameters(Settings settings) { + return new Processor.Parameters( + TestEnvironment.newEnvironment(settings), + null, + null, + null, + () -> 0L, + (a, b) -> null, + null, + null, + $ -> {}, + null + ); + } +} diff --git a/modules/lang-painless/build.gradle b/modules/lang-painless/build.gradle index fb51a0bb7f157..7075901979e3b 100644 --- a/modules/lang-painless/build.gradle +++ b/modules/lang-painless/build.gradle @@ -33,6 +33,7 @@ import com.github.jengelman.gradle.plugins.shadow.ShadowBasePlugin apply plugin: 'opensearch.validate-rest-spec' apply plugin: 'opensearch.yaml-rest-test' +apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { description 'An easy, safe and fast scripting language for OpenSearch' @@ -46,6 +47,7 @@ ext { testClusters.all { module ':modules:mapper-extras' + module ':modules:aggs-matrix-stats' systemProperty 'opensearch.scripting.update.ctx_in_params', 'false' // TODO: remove this once cname is prepended to transport.publish_address by default in 8.0 systemProperty 'opensearch.transport.cname_in_publish_address', 'true' diff --git a/modules/lang-painless/src/internalClusterTest/java/org/opensearch/painless/SimplePainlessIT.java b/modules/lang-painless/src/internalClusterTest/java/org/opensearch/painless/SimplePainlessIT.java new file mode 100644 index 0000000000000..c9078fdeeea28 --- /dev/null +++ b/modules/lang-painless/src/internalClusterTest/java/org/opensearch/painless/SimplePainlessIT.java @@ -0,0 +1,223 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.painless; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.opensearch.action.search.SearchRequest; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.action.support.WriteRequest; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.query.TermsQueryBuilder; +import org.opensearch.plugins.Plugin; +import org.opensearch.script.Script; +import org.opensearch.script.ScriptType; +import org.opensearch.search.aggregations.AggregationBuilder; +import org.opensearch.search.aggregations.AggregationBuilders; +import org.opensearch.search.aggregations.bucket.composite.InternalComposite; +import org.opensearch.search.aggregations.bucket.composite.TermsValuesSourceBuilder; +import org.opensearch.search.aggregations.bucket.terms.Terms; +import org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder; +import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; + +@OpenSearchIntegTestCase.SuiteScopeTestCase +public class SimplePainlessIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public SimplePainlessIT(Settings nodeSettings) { + super(nodeSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() } + ); + } + + @Override + protected Collection> nodePlugins() { + return List.of(PainlessModulePlugin.class); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING.getKey(), "4") + .build(); + } + + @Override + public void setupSuiteScopeCluster() throws Exception { + XContentBuilder xContentBuilder = XContentFactory.jsonBuilder() + .startObject() + .field("dynamic", "false") + .startObject("_meta") + .field("schema_version", 5) + .endObject() + .startObject("properties") + .startObject("entity") + .field("type", "nested") + .endObject() + .endObject() + .endObject(); + + assertAcked( + prepareCreate("test").setMapping(xContentBuilder) + .setSettings( + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + ) + ); + + assertAcked( + prepareCreate("test-df").setSettings( + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + ) + ); + + client().prepareIndex("test") + .setId("a") + .setSource( + "{\"entity\":[{\"name\":\"ip-field\",\"value\":\"1.2.3.4\"},{\"name\":\"keyword-field\",\"value\":\"field-1\"}]}", + MediaTypeRegistry.JSON + ) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + client().prepareIndex("test") + .setId("b") + .setSource( + "{\"entity\":[{\"name\":\"ip-field\",\"value\":\"5.6.7.8\"},{\"name\":\"keyword-field\",\"value\":\"field-2\"}]}", + MediaTypeRegistry.JSON + ) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + client().prepareIndex("test") + .setId("c") + .setSource( + "{\"entity\":[{\"name\":\"ip-field\",\"value\":\"1.6.3.8\"},{\"name\":\"keyword-field\",\"value\":\"field-2\"}]}", + MediaTypeRegistry.JSON + ) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + client().prepareIndex("test") + .setId("d") + .setSource( + "{\"entity\":[{\"name\":\"ip-field\",\"value\":\"2.6.4.8\"},{\"name\":\"keyword-field\",\"value\":\"field-2\"}]}", + MediaTypeRegistry.JSON + ) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + ensureSearchable("test"); + + client().prepareIndex("test-df") + .setId("a") + .setSource("{\"field\":\"value1\"}", MediaTypeRegistry.JSON) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + client().prepareIndex("test-df") + .setId("b") + .setSource("{\"field\":\"value2\"}", MediaTypeRegistry.JSON) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + client().prepareIndex("test-df") + .setId("c") + .setSource("{\"field\":\"value3\"}", MediaTypeRegistry.JSON) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + client().prepareIndex("test-df") + .setId("d") + .setSource("{\"field\":\"value1\"}", MediaTypeRegistry.JSON) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + ensureSearchable("test"); + } + + public void testTermsValuesSource() throws Exception { + AggregationBuilder agg = AggregationBuilders.composite( + "multi_buckets", + Collections.singletonList( + new TermsValuesSourceBuilder("keyword-field").script( + new Script( + ScriptType.INLINE, + "painless", + "String value = null; if (params == null || params._source == null || params._source.entity == null) { return \"\"; } for (item in params._source.entity) { if (item[\"name\"] == \"keyword-field\") { value = item['value']; break; } } return value;", + Collections.emptyMap() + ) + ) + ) + ); + SearchResponse response = client().prepareSearch("test").setQuery(matchAllQuery()).addAggregation(agg).get(); + + assertSearchResponse(response); + assertEquals(2, ((InternalComposite) response.getAggregations().get("multi_buckets")).getBuckets().size()); + assertEquals( + "field-1", + ((InternalComposite) response.getAggregations().get("multi_buckets")).getBuckets().get(0).getKey().get("keyword-field") + ); + assertEquals(1, ((InternalComposite) response.getAggregations().get("multi_buckets")).getBuckets().get(0).getDocCount()); + assertEquals( + "field-2", + ((InternalComposite) response.getAggregations().get("multi_buckets")).getBuckets().get(1).getKey().get("keyword-field") + ); + assertEquals(3, ((InternalComposite) response.getAggregations().get("multi_buckets")).getBuckets().get(1).getDocCount()); + } + + public void testSimpleDerivedFieldsQuery() { + SearchRequest searchRequest = new SearchRequest("test-df").source( + SearchSourceBuilder.searchSource() + .derivedField("result", "keyword", new Script("emit(params._source[\"field\"])")) + .fetchField("result") + .query(new TermsQueryBuilder("result", "value1")) + ); + SearchResponse response = client().search(searchRequest).actionGet(); + assertSearchResponse(response); + assertEquals(2, Objects.requireNonNull(response.getHits().getTotalHits()).value); + } + + public void testSimpleDerivedFieldsAgg() { + SearchRequest searchRequest = new SearchRequest("test-df").source( + SearchSourceBuilder.searchSource() + .derivedField("result", "keyword", new Script("emit(params._source[\"field\"])")) + .fetchField("result") + .aggregation(new TermsAggregationBuilder("derived-agg").field("result")) + ); + SearchResponse response = client().search(searchRequest).actionGet(); + assertSearchResponse(response); + Terms aggResponse = response.getAggregations().get("derived-agg"); + assertEquals(3, aggResponse.getBuckets().size()); + Terms.Bucket bucket = aggResponse.getBuckets().get(0); + assertEquals("value1", bucket.getKey()); + assertEquals(2, bucket.getDocCount()); + bucket = aggResponse.getBuckets().get(1); + assertEquals("value2", bucket.getKey()); + assertEquals(1, bucket.getDocCount()); + bucket = aggResponse.getBuckets().get(2); + assertEquals("value3", bucket.getKey()); + assertEquals(1, bucket.getDocCount()); + } +} diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/derived_fields/60_derived_field_aggs.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/derived_fields/60_derived_field_aggs.yml new file mode 100644 index 0000000000000..87c260ce5f308 --- /dev/null +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/derived_fields/60_derived_field_aggs.yml @@ -0,0 +1,1515 @@ +--- +setup: +- skip: + version: " - 2.14.99" + reason: "derived_field feature was added in 2.15" + +# -- NOT SUPPORTED: -- +# geobounds +# scripted metric +# -- NOT SUPPORTED: -- +# Any geo agg +# sig terms/text + +- do: + indices.create: + index: test + body: + mappings: + properties: + text: + type: text + keyword: + type: keyword + os: + type: keyword + long: + type: long + float: + type: float + double: + type: double + date: + type: date + geo: + type: geo_point + ip: + type: ip + boolean: + type: boolean + array_of_long: + type: long + json_field: + type: text + derived: + derived_text: + type: text + script: "emit(params._source[\"text\"])" + derived_text_prefilter_field: + type: text + script: "emit(params._source[\"text\"])" + prefilter_field: "text" + derived_keyword: + type: keyword + script: "emit(params._source[\"keyword\"])" + derived_os: + type: keyword + script: "emit(params._source[\"os\"])" + derived_long: + type: long + script: "emit(params._source[\"long\"])" + derived_float: + type: float + script: "emit(params._source[\"float\"])" + derived_double: + type: double + script: "emit(params._source[\"double\"])" + derived_date: + type: date + script: "emit(ZonedDateTime.parse(params._source[\"date\"]).toInstant().toEpochMilli())" + derived_geo: + type: geo_point + script: "emit(params._source[\"geo\"][0], params._source[\"geo\"][1])" + derived_ip: + type: ip + script: "emit(params._source[\"ip\"])" + derived_boolean: + type: boolean + script: "emit(params._source[\"boolean\"])" + derived_array_of_long: + type: long + script: "emit(params._source[\"array_of_long\"][0]);emit(params._source[\"array_of_long\"][1]);" + derived_object: + type: object + properties: + keyword: keyword + ip: ip + os: keyword + script: "emit(params._source[\"json_field\"])" + prefilter_field: "json_field" + +- do: + bulk: + refresh: true + body: + - index: + _index: test + _id: 1 + - text: "peter piper" + keyword: "foo" + os: "mac" + long: 1 + float: 1.0 + double: 1.0 + date: "2017-01-01T00:00:00Z" + geo: [ -74.0060, 40.7128 ] + ip: "192.168.0.1" + boolean: true + array_of_long: [ 1, 2 ] + json_field: "{\"text\":\"peter piper\",\"keyword\":\"foo\",\"os\":\"mac\",\"long\":1,\"float\":1.0,\"double\":1.0,\"date\":\"2017-01-01T00:00:00Z\",\"ip\":\"192.168.0.1\",\"boolean\":true, \"array_of_long\": [1, 2]}" + - index: + _index: test + _id: 2 + - text: "piper picked a peck" + keyword: "bar" + os: "windows" + long: 2 + float: 2.0 + double: 2.0 + date: "2017-01-02T00:00:00Z" + geo: [ -118.2437, 34.0522 ] + ip: "10.0.0.1" + boolean: false + array_of_long: [ 2, 3 ] + json_field: "{\"keyword\":\"bar\",\"long\":2,\"float\":2.0,\"os\":\"windows\",\"double\":2.0,\"date\":\"2017-01-02T00:00:00Z\",\"ip\":\"10.0.0.1\",\"boolean\":false, \"array_of_long\": [2, 3]}" + - index: + _index: test + _id: 3 + - text: "peck of pickled peppers" + keyword: "baz" + os: "mac" + long: -3 + float: -3.0 + double: -3.0 + date: "2017-01-03T00:00:00Z" + geo: [ -87.6298, 41.87 ] + ip: "172.16.0.1" + boolean: true + array_of_long: [ 3, 4 ] + json_field: "{\"keyword\":\"baz\",\"long\":-3,\"float\":-3.0,\"os\":\"mac\",\"double\":-3.0,\"date\":\"2017-01-03T00:00:00Z\",\"ip\":\"172.16.0.1\",\"boolean\":true, \"array_of_long\": [3, 4]}" + - index: + _index: test + _id: 4 + - text: "pickled peppers" + keyword: "qux" + os: "windows" + long: 4 + float: 4.0 + double: 4.0 + date: "2017-01-04T00:00:00Z" + geo: [ -74.0060, 40.7128 ] + ip: "192.168.0.2" + boolean: false + array_of_long: [ 4, 5 ] + json_field: "{\"keyword\":\"qux\",\"long\":4,\"float\":4.0,\"os\":\"windows\",\"double\":4.0,\"date\":\"2017-01-04T00:00:00Z\",\"ip\":\"192.168.0.2\",\"boolean\":false, \"array_of_long\": [4, 5]}" + - index: + _index: test + _id: 5 + - text: "peppers" + keyword: "quux" + os: "mac" + long: 5 + float: 5.0 + double: 5.0 + date: "2017-01-05T00:00:00Z" + geo: [ -87.6298, 41.87 ] + ip: "10.0.0.2" + boolean: true + array_of_long: [ 5, 6 ] + json_field: "{\"keyword\":\"quux\",\"long\":5,\"float\":5.0,\"os\":\"mac\",\"double\":5.0,\"date\":\"2017-01-05T00:00:00Z\",\"ip\":\"10.0.0.2\",\"boolean\":true, \"array_of_long\": [5, 6]}" + +- do: + indices.refresh: + index: [test] + +### BUCKET AGGS +--- +"Test terms aggregation on derived_keyword from search definition": +- do: + search: + index: test + body: + derived: + derived_keyword_search_definition: + type: keyword + script: "emit(params._source[\"keyword\"])" + size: 0 + aggs: + keywords: + terms: + field: derived_keyword_search_definition + +- match: { hits.total.value: 5 } +- length: { aggregations.keywords.buckets: 5 } +- match: { aggregations.keywords.buckets.0.key: "bar" } +- match: { aggregations.keywords.buckets.0.doc_count: 1 } +- match: { aggregations.keywords.buckets.1.key: "baz" } +- match: { aggregations.keywords.buckets.1.doc_count: 1 } +- match: { aggregations.keywords.buckets.2.key: "foo" } +- match: { aggregations.keywords.buckets.2.doc_count: 1 } +- match: { aggregations.keywords.buckets.3.key: "quux" } +- match: { aggregations.keywords.buckets.3.doc_count: 1 } +- match: { aggregations.keywords.buckets.4.key: "qux" } +- match: { aggregations.keywords.buckets.4.doc_count: 1 } + +--- +"Test terms aggregation on derived_keyword": +- do: + search: + index: test + body: + size: 0 + aggs: + keywords: + terms: + field: derived_keyword + +- match: { hits.total.value: 5 } +- length: { aggregations.keywords.buckets: 5 } +- match: { aggregations.keywords.buckets.0.key: "bar" } +- match: { aggregations.keywords.buckets.0.doc_count: 1 } +- match: { aggregations.keywords.buckets.1.key: "baz" } +- match: { aggregations.keywords.buckets.1.doc_count: 1 } +- match: { aggregations.keywords.buckets.2.key: "foo" } +- match: { aggregations.keywords.buckets.2.doc_count: 1 } +- match: { aggregations.keywords.buckets.3.key: "quux" } +- match: { aggregations.keywords.buckets.3.doc_count: 1 } +- match: { aggregations.keywords.buckets.4.key: "qux" } +- match: { aggregations.keywords.buckets.4.doc_count: 1 } + +--- +"Test range aggregation on derived_long": +- do: + search: + index: test + body: + size: 0 + aggs: + long_ranges: + range: + field: derived_long + ranges: + - to: 0 + - from: 0 + to: 3 + - from: 3 + +- match: { hits.total.value: 5 } +- length: { aggregations.long_ranges.buckets: 3 } +- match: { aggregations.long_ranges.buckets.0.doc_count: 1 } +- match: { aggregations.long_ranges.buckets.1.doc_count: 2 } +- match: { aggregations.long_ranges.buckets.2.doc_count: 2 } + +--- +"Test histogram aggregation on derived_float": +- do: + search: + index: test + body: + size: 0 + aggs: + float_histogram: + histogram: + field: derived_float + interval: 2 + +- match: { hits.total.value: 5 } +- length: { aggregations.float_histogram.buckets: 5 } +- match: { aggregations.float_histogram.buckets.0.key: -4.0 } +- match: { aggregations.float_histogram.buckets.0.doc_count: 1 } + +--- +"Test date_histogram aggregation on derived_date": +- do: + search: + index: test + body: + size: 0 + aggs: + date_histogram: + date_histogram: + field: derived_date + calendar_interval: day + +- match: { hits.total.value: 5 } +- length: { aggregations.date_histogram.buckets: 5 } +- match: { aggregations.date_histogram.buckets.0.key_as_string: "2017-01-01T00:00:00.000Z" } +- match: { aggregations.date_histogram.buckets.0.doc_count: 1 } +- match: { aggregations.date_histogram.buckets.1.key_as_string: "2017-01-02T00:00:00.000Z" } +- match: { aggregations.date_histogram.buckets.1.doc_count: 1 } +- match: { aggregations.date_histogram.buckets.2.key_as_string: "2017-01-03T00:00:00.000Z" } +- match: { aggregations.date_histogram.buckets.2.doc_count: 1 } +- match: { aggregations.date_histogram.buckets.3.key_as_string: "2017-01-04T00:00:00.000Z" } +- match: { aggregations.date_histogram.buckets.3.doc_count: 1 } +- match: { aggregations.date_histogram.buckets.4.key_as_string: "2017-01-05T00:00:00.000Z" } +- match: { aggregations.date_histogram.buckets.4.doc_count: 1 } + +--- +"Test date_range aggregation on derived_date": +- do: + search: + index: test + body: + size: 0 + aggs: + date_range: + date_range: + field: derived_date + ranges: + - to: "2017-01-03T00:00:00Z" + - from: "2017-01-03T00:00:00Z" + +- match: { hits.total.value: 5 } +- match: { aggregations.date_range.buckets.0.key: "*-2017-01-03T00:00:00.000Z" } +- match: { aggregations.date_range.buckets.0.doc_count: 2 } +- match: { aggregations.date_range.buckets.1.key: "2017-01-03T00:00:00.000Z-*" } +- match: { aggregations.date_range.buckets.1.doc_count: 3 } + +--- +"Test filters aggregation on derived_boolean": +- do: + search: + index: test + body: + size: 0 + aggs: + boolean_filters: + filters: + filters: + true_values: + term: + derived_boolean: true + false_values: + term: + derived_boolean: false + +- match: { hits.total.value: 5 } +- match: { aggregations.boolean_filters.buckets.true_values.doc_count: 3 } +- match: { aggregations.boolean_filters.buckets.false_values.doc_count: 2 } + +--- +"Test adjacency matrix aggregation on derived_long": +- do: + search: + index: test + body: + size: 0 + aggs: + adj_matrix: + adjacency_matrix: + filters: + high_num: + range: + derived_long: + gte: 3 + low_num: + range: + derived_long: + lt: 3 +- match: { hits.total.value: 5 } +- length: { aggregations.adj_matrix.buckets: 2 } +- match: { aggregations.adj_matrix.buckets.0.key: "high_num" } +- match: { aggregations.adj_matrix.buckets.0.doc_count: 2 } +- match: { aggregations.adj_matrix.buckets.1.key: "low_num" } +- match: { aggregations.adj_matrix.buckets.1.doc_count: 3 } + +### METRIC AGGS + +--- +"Test stats aggregation on derived_array_of_long": +- do: + search: + index: test + body: + size: 0 + aggs: + long_array_stats: + stats: + field: derived_array_of_long + +- match: { hits.total.value: 5 } +- match: { aggregations.long_array_stats.count: 10 } +- match: { aggregations.long_array_stats.min: 1 } +- match: { aggregations.long_array_stats.max: 6 } +- match: { aggregations.long_array_stats.avg: 3.5 } +- match: { aggregations.long_array_stats.sum: 35 } + +--- +"Test cardinality aggregation on derived_keyword": +- do: + search: + index: test + body: + size: 0 + aggs: + unique_keywords: + cardinality: + field: derived_keyword + +- match: { hits.total.value: 5 } +- match: { aggregations.unique_keywords.value: 5 } + +--- +"Test percentiles aggregation on derived_double": +- do: + search: + index: test + body: + size: 0 + aggs: + double_percentiles: + percentiles: + field: derived_double + percents: [ 25, 50, 75 ] + +- match: { hits.total.value: 5 } +- length: { aggregations.double_percentiles.values: 3} + +--- +"Test percentile ranks aggregation on derived_long": +- do: + search: + index: test + body: + size: 0 + aggs: + long_percentile_ranks: + percentile_ranks: + field: derived_long + values: [ 2, 4 ] + +- match: { hits.total.value: 5 } +- length: { aggregations.long_percentile_ranks.values: 2} + +--- +"Test top hits aggregation on derived_keyword": +- do: + search: + index: test + body: + size: 0 + aggs: + top_keywords: + terms: + field: derived_keyword + aggs: + top_hits: + top_hits: + size: 1 +- match: { hits.total.value: 5 } +- length: { aggregations.top_keywords.buckets: 5 } +- match: { aggregations.top_keywords.buckets.0.key: "bar" } +- match: { aggregations.top_keywords.buckets.0.doc_count: 1 } +- length: { aggregations.top_keywords.buckets.0.top_hits.hits.hits: 1 } + +--- +"Test matrix stats aggregation on derived_long and float": +- do: + search: + index: test + body: + size: 0 + aggs: + matrix_stats: + matrix_stats: + fields: [ derived_long, derived_float ] +- match: { hits.total.value: 5 } +- length: { aggregations.matrix_stats.fields: 2 } +- match: { aggregations.matrix_stats.fields.0.name: "derived_float" } +- match: { aggregations.matrix_stats.fields.0.count: 5 } +- match: { aggregations.matrix_stats.fields.1.name: "derived_long" } +- match: { aggregations.matrix_stats.fields.1.count: 5 } + +--- +"Test median absolute deviation aggregation on derived_long": +- do: + search: + index: test + body: + size: 0 + aggs: + mad_long: + median_absolute_deviation: + field: derived_long +- match: { hits.total.value: 5 } +- match: { aggregations.mad_long.value: 2.0 } + +## Pipeline agg +--- +"Test simple pipeline agg with derived_keyword and long": +- do: + search: + index: test + body: + size: 0 + aggs: + keywords: + terms: + field: derived_keyword + aggs: + sum_derived_longs: + sum: + field: derived_long + sum_total: + sum_bucket: + buckets_path: "keywords>sum_derived_longs" +- match: { hits.total.value: 5 } +- match: { aggregations.keywords.buckets.0.key: "bar" } +- match: { aggregations.keywords.buckets.0.sum_derived_longs.value: 2 } +- match: { aggregations.keywords.buckets.1.key: "baz" } +- match: { aggregations.keywords.buckets.1.sum_derived_longs.value: -3 } +- match: { aggregations.keywords.buckets.2.key: "foo" } +- match: { aggregations.keywords.buckets.2.sum_derived_longs.value: 1 } +- match: { aggregations.keywords.buckets.3.key: "quux" } +- match: { aggregations.keywords.buckets.3.sum_derived_longs.value: 5 } +- match: { aggregations.keywords.buckets.4.key: "qux" } +- match: { aggregations.keywords.buckets.4.sum_derived_longs.value: 4 } +- match: { aggregations.sum_total.value: 9 } + + +--- +"Test terms aggregation on derived_ip": +- do: + search: + index: test + body: + size: 0 + aggs: + ip_terms: + terms: + field: derived_ip + +- match: { hits.total.value: 5 } +- length: { aggregations.ip_terms.buckets: 5 } +- match: { aggregations.ip_terms.buckets.0.key: "10.0.0.1" } +- match: { aggregations.ip_terms.buckets.0.doc_count: 1 } + +--- +"Test range aggregation on derived_ip": +- do: + search: + index: test + body: + size: 0 + aggs: + ip_ranges: + ip_range: + field: derived_ip + ranges: + - to: "10.0.0.0" + - from: "10.0.0.0" + to: "172.16.0.0" + - from: "172.16.0.0" + +- match: { hits.total.value: 5 } +- length: { aggregations.ip_ranges.buckets: 3 } +- match: { aggregations.ip_ranges.buckets.0.doc_count: 0 } +- match: { aggregations.ip_ranges.buckets.1.doc_count: 2 } +- match: { aggregations.ip_ranges.buckets.2.doc_count: 3 } + +--- +"Test cardinality aggregation on derived_ip": +- do: + search: + index: test + body: + size: 0 + aggs: + unique_ips: + cardinality: + field: derived_ip + +- match: { hits.total.value: 5 } +- match: { aggregations.unique_ips.value: 5 } + +--- +"Test missing aggregation on derived_ip": +- do: + search: + index: test + body: + size: 0 + aggs: + missing_ips: + missing: + field: derived_ip + +- match: { hits.total.value: 5 } +- match: { aggregations.missing_ips.doc_count: 0 } + +--- +"Test value count aggregation on derived_ip": +- do: + search: + index: test + body: + size: 0 + aggs: + ip_count: + value_count: + field: derived_ip + +- match: { hits.total.value: 5 } +- match: { aggregations.ip_count.value: 5 } + +--- +"Test composite agg": +- do: + search: + index: test + body: + size: 0 + aggs: + test_composite_agg: + composite: + size: 10 + sources: + - os: + terms: + field: derived_os + - keyword: + terms: + field: derived_keyword + - is_true: + terms: + field: derived_boolean + aggs: + avg_long: + avg: + field: derived_long +- match: { aggregations.test_composite_agg.buckets.0.key.os: "mac" } +- match: { aggregations.test_composite_agg.buckets.0.key.keyword: "baz" } +- match: { aggregations.test_composite_agg.buckets.0.key.is_true: true } +- match: { aggregations.test_composite_agg.buckets.0.doc_count: 1 } +- match: { aggregations.test_composite_agg.buckets.0.avg_long.value: -3.0 } +- match: { aggregations.test_composite_agg.buckets.1.key.os: "mac" } +- match: { aggregations.test_composite_agg.buckets.1.key.keyword: "foo" } +- match: { aggregations.test_composite_agg.buckets.1.key.is_true: true } +- match: { aggregations.test_composite_agg.buckets.1.doc_count: 1 } +- match: { aggregations.test_composite_agg.buckets.1.avg_long.value: 1.0 } +- match: { aggregations.test_composite_agg.buckets.2.key.os: "mac" } +- match: { aggregations.test_composite_agg.buckets.2.key.keyword: "quux" } +- match: { aggregations.test_composite_agg.buckets.2.key.is_true: true } +- match: { aggregations.test_composite_agg.buckets.2.doc_count: 1 } +- match: { aggregations.test_composite_agg.buckets.2.avg_long.value: 5.0 } +- match: { aggregations.test_composite_agg.buckets.3.key.os: "windows" } +- match: { aggregations.test_composite_agg.buckets.3.key.keyword: "bar" } +- match: { aggregations.test_composite_agg.buckets.3.key.is_true: false } +- match: { aggregations.test_composite_agg.buckets.3.doc_count: 1 } +- match: { aggregations.test_composite_agg.buckets.3.avg_long.value: 2.0 } +- match: { aggregations.test_composite_agg.buckets.4.key.os: "windows" } +- match: { aggregations.test_composite_agg.buckets.4.key.keyword: "qux" } +- match: { aggregations.test_composite_agg.buckets.4.key.is_true: false } +- match: { aggregations.test_composite_agg.buckets.4.doc_count: 1 } +- match: { aggregations.test_composite_agg.buckets.4.avg_long.value: 4.0 } + +--- +"Test auto date histogram": +- do: + search: + rest_total_hits_as_int: true + index: test + body: + size: 0 + aggs: + test_auto_date_histogram: + auto_date_histogram: + field: "derived_date" + buckets: 10 + format: "yyyy-MM-dd" + aggs: + avg_long: + avg: + field: derived_long +- match: { hits.total: 5 } +- length: { aggregations.test_auto_date_histogram.buckets: 9 } +- match: { aggregations.test_auto_date_histogram.buckets.0.key_as_string: "2017-01-01"} +- match: { aggregations.test_auto_date_histogram.buckets.0.avg_long.value: 1.0} + +--- +"Test variable_width_histogram aggregation": +- do: + search: + index: test + body: + size: 0 + aggs: + var_width_hist: + variable_width_histogram: + field: derived_long + buckets: 3 + +- match: { hits.total.value: 5 } +- length: { aggregations.var_width_hist.buckets: 3 } + +--- +"Test extended_stats aggregation": +- do: + search: + index: test + body: + size: 0 + aggs: + extended_stats_agg: + extended_stats: + field: derived_long + +- match: { hits.total.value: 5 } +- match: { aggregations.extended_stats_agg.count: 5 } +- match: { aggregations.extended_stats_agg.min: -3 } +- match: { aggregations.extended_stats_agg.max: 5 } +- is_true: aggregations.extended_stats_agg.avg +- is_true: aggregations.extended_stats_agg.sum +- is_true: aggregations.extended_stats_agg.sum_of_squares +- is_true: aggregations.extended_stats_agg.variance +- is_true: aggregations.extended_stats_agg.std_deviation + +--- +"Test rare_terms aggregation": +- do: + search: + index: test + body: + size: 0 + aggs: + rare_terms_agg: + rare_terms: + field: derived_keyword + max_doc_count: 1 + +- match: { hits.total.value: 5 } +- length: { aggregations.rare_terms_agg.buckets: 5 } + +--- +"Test global aggregation": +- do: + search: + index: test + body: + query: + term: + derived_keyword: "foo" + aggs: + all_docs: + global: {} + aggs: + avg_long: + avg: + field: derived_long + +- match: { hits.total.value: 1 } +- match: { aggregations.all_docs.doc_count: 5 } +- match: { aggregations.all_docs.avg_long.value: 1.8 } + +--- +"Test missing aggregation": +- do: + search: + index: test + body: + size: 0 + aggs: + missing_agg: + missing: + field: derived_keyword + +- match: { hits.total.value: 5 } +- match: { aggregations.missing_agg.doc_count: 0 } + +--- +"Test value_count aggregation": +- do: + search: + index: test + body: + size: 0 + aggs: + value_count_agg: + value_count: + field: derived_long + +- match: { hits.total.value: 5 } +- match: { aggregations.value_count_agg.value: 5 } + +--- +"Test weighted_avg aggregation": +- do: + search: + index: test + body: + size: 0 + aggs: + weighted_avg_agg: + weighted_avg: + value: + field: derived_long + weight: + field: derived_float + +- match: { hits.total.value: 5 } +- is_true: aggregations.weighted_avg_agg.value + +--- +"Test diversified_sampler aggregation": +- do: + search: + index: test + body: + size: 0 + aggs: + diversified_sampler_agg: + diversified_sampler: + field: derived_keyword + max_docs_per_value: 1 + aggs: + avg_long: + avg: + field: derived_long + +- match: { hits.total.value: 5 } +- match: { aggregations.diversified_sampler_agg.doc_count: 5 } +- match: { aggregations.diversified_sampler_agg.avg_long.value: 1.8 } + +--- +"Test sampler aggregation": +- do: + search: + index: test + body: + size: 0 + aggs: + sampler_agg: + sampler: + shard_size: 2 + aggs: + avg_long: + avg: + field: derived_long + +- match: { hits.total.value: 5 } +- is_true: aggregations.sampler_agg.doc_count +- is_true: aggregations.sampler_agg.avg_long.value + +--- +"Test multi_terms aggregation": +- do: + search: + index: test + body: + size: 0 + aggs: + multi_terms_agg: + multi_terms: + terms: + - field: derived_keyword + - field: derived_os + size: 10 + +- match: { hits.total.value: 5 } +- length: { aggregations.multi_terms_agg.buckets: 5 } + +#### SAME TESTS WITH DERIVED_OBJECT +--- +"Test terms aggregation on derived_object.keyword": +- do: + search: + index: test + body: + size: 0 + aggs: + keywords: + terms: + field: derived_object.keyword + +- match: { hits.total.value: 5 } +- length: { aggregations.keywords.buckets: 5 } +- match: { aggregations.keywords.buckets.0.key: "bar" } +- match: { aggregations.keywords.buckets.0.doc_count: 1 } +- match: { aggregations.keywords.buckets.1.key: "baz" } +- match: { aggregations.keywords.buckets.1.doc_count: 1 } +- match: { aggregations.keywords.buckets.2.key: "foo" } +- match: { aggregations.keywords.buckets.2.doc_count: 1 } +- match: { aggregations.keywords.buckets.3.key: "quux" } +- match: { aggregations.keywords.buckets.3.doc_count: 1 } +- match: { aggregations.keywords.buckets.4.key: "qux" } +- match: { aggregations.keywords.buckets.4.doc_count: 1 } + +--- +"Test range aggregation on derived_object.long": +- do: + search: + index: test + body: + size: 0 + aggs: + long_ranges: + range: + field: derived_object.long + ranges: + - to: 0 + - from: 0 + to: 3 + - from: 3 + +- match: { hits.total.value: 5 } +- length: { aggregations.long_ranges.buckets: 3 } +- match: { aggregations.long_ranges.buckets.0.doc_count: 1 } +- match: { aggregations.long_ranges.buckets.1.doc_count: 2 } +- match: { aggregations.long_ranges.buckets.2.doc_count: 2 } + +--- +"Test histogram aggregation on derived_object.float": +- do: + search: + index: test + body: + size: 0 + aggs: + float_histogram: + histogram: + field: derived_object.float + interval: 2 + +- match: { hits.total.value: 5 } +- length: { aggregations.float_histogram.buckets: 5 } +- match: { aggregations.float_histogram.buckets.0.key: -4.0 } +- match: { aggregations.float_histogram.buckets.0.doc_count: 1 } + +--- +"Test date_histogram aggregation on derived_object.date": +- do: + search: + index: test + body: + size: 0 + aggs: + date_histogram: + date_histogram: + field: derived_object.date + calendar_interval: day + +- match: { hits.total.value: 5 } +- length: { aggregations.date_histogram.buckets: 5 } +- match: { aggregations.date_histogram.buckets.0.key_as_string: "2017-01-01T00:00:00.000Z" } +- match: { aggregations.date_histogram.buckets.0.doc_count: 1 } +- match: { aggregations.date_histogram.buckets.1.key_as_string: "2017-01-02T00:00:00.000Z" } +- match: { aggregations.date_histogram.buckets.1.doc_count: 1 } +- match: { aggregations.date_histogram.buckets.2.key_as_string: "2017-01-03T00:00:00.000Z" } +- match: { aggregations.date_histogram.buckets.2.doc_count: 1 } +- match: { aggregations.date_histogram.buckets.3.key_as_string: "2017-01-04T00:00:00.000Z" } +- match: { aggregations.date_histogram.buckets.3.doc_count: 1 } +- match: { aggregations.date_histogram.buckets.4.key_as_string: "2017-01-05T00:00:00.000Z" } +- match: { aggregations.date_histogram.buckets.4.doc_count: 1 } + +--- +"Test date_range aggregation on derived_object.date": +- do: + search: + index: test + body: + size: 0 + aggs: + date_range: + date_range: + field: derived_object.date + ranges: + - to: "2017-01-03T00:00:00Z" + - from: "2017-01-03T00:00:00Z" + +- match: { hits.total.value: 5 } +- match: { aggregations.date_range.buckets.0.key: "*-2017-01-03T00:00:00.000Z" } +- match: { aggregations.date_range.buckets.0.doc_count: 2 } +- match: { aggregations.date_range.buckets.1.key: "2017-01-03T00:00:00.000Z-*" } +- match: { aggregations.date_range.buckets.1.doc_count: 3 } + +--- +"Test filters aggregation on derived_object.boolean": +- do: + search: + index: test + body: + size: 0 + aggs: + boolean_filters: + filters: + filters: + true_values: + term: + derived_object.boolean: true + false_values: + term: + derived_object.boolean: false + +- match: { hits.total.value: 5 } +- match: { aggregations.boolean_filters.buckets.true_values.doc_count: 3 } +- match: { aggregations.boolean_filters.buckets.false_values.doc_count: 2 } + +--- +"Test adjacency matrix aggregation on derived_object.long": +- do: + search: + index: test + body: + size: 0 + aggs: + adj_matrix: + adjacency_matrix: + filters: + high_num: + range: + derived_object.long: + gte: 3 + low_num: + range: + derived_object.long: + lt: 3 +- match: { hits.total.value: 5 } +- length: { aggregations.adj_matrix.buckets: 2 } +- match: { aggregations.adj_matrix.buckets.0.key: "high_num" } +- match: { aggregations.adj_matrix.buckets.0.doc_count: 2 } +- match: { aggregations.adj_matrix.buckets.1.key: "low_num" } +- match: { aggregations.adj_matrix.buckets.1.doc_count: 3 } + +--- +"Test stats aggregation on derived_object.array_of_long": +- do: + search: + index: test + body: + size: 0 + aggs: + long_array_stats: + stats: + field: derived_object.array_of_long + +- match: { hits.total.value: 5 } +- match: { aggregations.long_array_stats.count: 10 } +- match: { aggregations.long_array_stats.min: 1 } +- match: { aggregations.long_array_stats.max: 6 } +- match: { aggregations.long_array_stats.avg: 3.5 } +- match: { aggregations.long_array_stats.sum: 35 } + +--- +"Test cardinality aggregation on derived_object_keyword": +- do: + search: + index: test + body: + size: 0 + aggs: + unique_keywords: + cardinality: + field: derived_object.keyword + +- match: { hits.total.value: 5 } +- match: { aggregations.unique_keywords.value: 5 } + +--- +"Test percentiles aggregation on derived_object.double": +- do: + search: + index: test + body: + size: 0 + aggs: + double_percentiles: + percentiles: + field: derived_object.double + percents: [ 25, 50, 75 ] + +- match: { hits.total.value: 5 } +- length: { aggregations.double_percentiles.values: 3} + +--- +"Test percentile ranks aggregation on derived_object.long": +- do: + search: + index: test + body: + size: 0 + aggs: + long_percentile_ranks: + percentile_ranks: + field: derived_object.long + values: [ 2, 4 ] + +- match: { hits.total.value: 5 } +- length: { aggregations.long_percentile_ranks.values: 2} + +--- +"Test top hits aggregation on derived_object.keyword": +- do: + search: + index: test + body: + size: 0 + aggs: + top_keywords: + terms: + field: derived_object.keyword + aggs: + top_hits: + top_hits: + size: 1 +- match: { hits.total.value: 5 } +- length: { aggregations.top_keywords.buckets: 5 } +- match: { aggregations.top_keywords.buckets.0.key: "bar" } +- match: { aggregations.top_keywords.buckets.0.doc_count: 1 } +- length: { aggregations.top_keywords.buckets.0.top_hits.hits.hits: 1 } + +--- +"Test matrix stats aggregation on derived_object.long and float": +- do: + search: + index: test + body: + size: 0 + aggs: + matrix_stats: + matrix_stats: + fields: [ derived_object.long, derived_object.float ] +- match: { hits.total.value: 5 } +- length: { aggregations.matrix_stats.fields: 2 } +- match: { aggregations.matrix_stats.fields.0.name: "derived_object.long" } +- match: { aggregations.matrix_stats.fields.0.count: 5 } +- match: { aggregations.matrix_stats.fields.1.name: "derived_object.float" } +- match: { aggregations.matrix_stats.fields.1.count: 5 } + +--- +"Test median absolute deviation aggregation on derived_object.long": +- do: + search: + index: test + body: + size: 0 + aggs: + mad_long: + median_absolute_deviation: + field: derived_object.long +- match: { hits.total.value: 5 } +- match: { aggregations.mad_long.value: 2.0 } + +--- +"Test simple pipeline agg derived_object": +- do: + search: + index: test + body: + size: 0 + aggs: + keywords: + terms: + field: derived_object.keyword + aggs: + sum_derived_longs: + sum: + field: derived_object.long + sum_total: + sum_bucket: + buckets_path: "keywords>sum_derived_longs" +- match: { hits.total.value: 5 } +- match: { aggregations.keywords.buckets.0.key: "bar" } +- match: { aggregations.keywords.buckets.0.sum_derived_longs.value: 2 } +- match: { aggregations.keywords.buckets.1.key: "baz" } +- match: { aggregations.keywords.buckets.1.sum_derived_longs.value: -3 } +- match: { aggregations.keywords.buckets.2.key: "foo" } +- match: { aggregations.keywords.buckets.2.sum_derived_longs.value: 1 } +- match: { aggregations.keywords.buckets.3.key: "quux" } +- match: { aggregations.keywords.buckets.3.sum_derived_longs.value: 5 } +- match: { aggregations.keywords.buckets.4.key: "qux" } +- match: { aggregations.keywords.buckets.4.sum_derived_longs.value: 4 } +- match: { aggregations.sum_total.value: 9 } + + +--- +"Test composite agg on derived_object": +- do: + search: + index: test + body: + size: 0 + aggs: + test_composite_agg: + composite: + size: 10 + sources: + - os: + terms: + field: derived_object.os + - keyword: + terms: + field: derived_object.keyword + - is_true: + terms: + field: derived_object.boolean + aggs: + avg_long: + avg: + field: derived_object.long +- length: { aggregations.test_composite_agg.buckets: 5 } +- match: { aggregations.test_composite_agg.buckets.0.key.os: "mac" } +- match: { aggregations.test_composite_agg.buckets.0.key.keyword: "baz" } +- match: { aggregations.test_composite_agg.buckets.0.key.is_true: true } +- match: { aggregations.test_composite_agg.buckets.0.doc_count: 1 } +- match: { aggregations.test_composite_agg.buckets.0.avg_long.value: -3.0 } +- match: { aggregations.test_composite_agg.buckets.1.key.os: "mac" } +- match: { aggregations.test_composite_agg.buckets.1.key.keyword: "foo" } +- match: { aggregations.test_composite_agg.buckets.1.key.is_true: true } +- match: { aggregations.test_composite_agg.buckets.1.doc_count: 1 } +- match: { aggregations.test_composite_agg.buckets.1.avg_long.value: 1.0 } +- match: { aggregations.test_composite_agg.buckets.2.key.os: "mac" } +- match: { aggregations.test_composite_agg.buckets.2.key.keyword: "quux" } +- match: { aggregations.test_composite_agg.buckets.2.key.is_true: true } +- match: { aggregations.test_composite_agg.buckets.2.doc_count: 1 } +- match: { aggregations.test_composite_agg.buckets.2.avg_long.value: 5.0 } +- match: { aggregations.test_composite_agg.buckets.3.key.os: "windows" } +- match: { aggregations.test_composite_agg.buckets.3.key.keyword: "bar" } +- match: { aggregations.test_composite_agg.buckets.3.key.is_true: false } +- match: { aggregations.test_composite_agg.buckets.3.doc_count: 1 } +- match: { aggregations.test_composite_agg.buckets.3.avg_long.value: 2.0 } +- match: { aggregations.test_composite_agg.buckets.4.key.os: "windows" } +- match: { aggregations.test_composite_agg.buckets.4.key.keyword: "qux" } +- match: { aggregations.test_composite_agg.buckets.4.key.is_true: false } +- match: { aggregations.test_composite_agg.buckets.4.doc_count: 1 } +- match: { aggregations.test_composite_agg.buckets.4.avg_long.value: 4.0 } + +--- +"Test auto date histogram on derived_object": +- do: + search: + rest_total_hits_as_int: true + index: test + body: + size: 0 + aggs: + test_auto_date_histogram: + auto_date_histogram: + field: "derived_object.date" + buckets: 10 + format: "yyyy-MM-dd" + aggs: + avg_long: + avg: + field: derived_object.long +- match: { hits.total: 5 } +- length: { aggregations.test_auto_date_histogram.buckets: 9 } +- match: { aggregations.test_auto_date_histogram.buckets.0.key_as_string: "2017-01-01"} +- match: { aggregations.test_auto_date_histogram.buckets.0.avg_long.value: 1.0} + +--- +"Test variable_width_histogram aggregation on derived_object": +- do: + search: + index: test + body: + size: 0 + aggs: + var_width_hist: + variable_width_histogram: + field: derived_object.long + buckets: 3 + +- match: { hits.total.value: 5 } +- length: { aggregations.var_width_hist.buckets: 3 } + +--- +"Test extended_stats aggregation on derived_object": +- do: + search: + index: test + body: + size: 0 + aggs: + extended_stats_agg: + extended_stats: + field: derived_object.long + +- match: { hits.total.value: 5 } +- match: { aggregations.extended_stats_agg.count: 5 } +- match: { aggregations.extended_stats_agg.min: -3 } +- match: { aggregations.extended_stats_agg.max: 5 } +- is_true: aggregations.extended_stats_agg.avg +- is_true: aggregations.extended_stats_agg.sum +- is_true: aggregations.extended_stats_agg.sum_of_squares +- is_true: aggregations.extended_stats_agg.variance +- is_true: aggregations.extended_stats_agg.std_deviation + +--- +"Test rare_terms aggregation on derived_object": +- do: + search: + index: test + body: + size: 0 + aggs: + rare_terms_agg: + rare_terms: + field: derived_object.keyword + max_doc_count: 1 + +- match: { hits.total.value: 5 } +- length: { aggregations.rare_terms_agg.buckets: 5 } + +--- +"Test global aggregation on derived_object": +- do: + search: + index: test + body: + query: + term: + derived_object.keyword: "foo" + aggs: + all_docs: + global: {} + aggs: + avg_long: + avg: + field: derived_object.long + +- match: { hits.total.value: 1 } +- match: { aggregations.all_docs.doc_count: 5 } +- match: { aggregations.all_docs.avg_long.value: 1.8 } + +--- +"Test value_count aggregation on derived_object": +- do: + search: + index: test + body: + size: 0 + aggs: + value_count_agg: + value_count: + field: derived_object.long + +- match: { hits.total.value: 5 } +- match: { aggregations.value_count_agg.value: 5 } + +--- +"Test multi_terms aggregation on derived_object": +- do: + search: + index: test + body: + size: 0 + aggs: + multi_terms_agg: + multi_terms: + terms: + - field: derived_object.keyword + - field: derived_object.os + size: 10 + +- match: { hits.total.value: 5 } +- length: { aggregations.multi_terms_agg.buckets: 5 } + + +### IP specific tests +--- +"Test terms aggregation on derived_object_ip": +- do: + search: + index: test + body: + size: 0 + aggs: + ip_terms: + terms: + field: derived_object.ip + +- match: { hits.total.value: 5 } +- length: { aggregations.ip_terms.buckets: 5 } +- match: { aggregations.ip_terms.buckets.0.key: "10.0.0.1" } +- match: { aggregations.ip_terms.buckets.0.doc_count: 1 } + +--- +"Test range aggregation on derived_object_ip": +- do: + search: + index: test + body: + size: 0 + aggs: + ip_ranges: + ip_range: + field: derived_object.ip + ranges: + - to: "10.0.0.0" + - from: "10.0.0.0" + to: "172.16.0.0" + - from: "172.16.0.0" + +- match: { hits.total.value: 5 } +- length: { aggregations.ip_ranges.buckets: 3 } +- match: { aggregations.ip_ranges.buckets.0.doc_count: 0 } +- match: { aggregations.ip_ranges.buckets.1.doc_count: 2 } +- match: { aggregations.ip_ranges.buckets.2.doc_count: 3 } + +--- +"Test cardinality aggregation on derived_object_ip": +- do: + search: + index: test + body: + size: 0 + aggs: + unique_ips: + cardinality: + field: derived_object.ip + +- match: { hits.total.value: 5 } +- match: { aggregations.unique_ips.value: 5 } + +--- +"Test missing aggregation on derived_object_ip": +- do: + search: + index: test + body: + size: 0 + aggs: + missing_ips: + missing: + field: derived_object.ip + +- match: { hits.total.value: 5 } +- match: { aggregations.missing_ips.doc_count: 0 } + +--- +"Test value count aggregation on derived_object_ip": +- do: + search: + index: test + body: + size: 0 + aggs: + ip_count: + value_count: + field: derived_object.ip + +- match: { hits.total.value: 5 } +- match: { aggregations.ip_count.value: 5 } + +### TEST UNSUPPORTED AGG TYPES +--- +"Test sig terms not supported": +- do: + catch: /illegal_argument_exception/ + search: + rest_total_hits_as_int: true + index: test + body: + query: + terms: + derived_keyword: ["foo"] + aggs: + significant_os: + significant_terms: + field: "derived_os" + min_doc_count: 1 + size: 10 + +--- +"Test significant text": +- do: + catch: /illegal_argument_exception/ + search: + rest_total_hits_as_int: true + index: test + body: + query: + terms: + derived_keyword: ["foo"] + aggs: + significant_words: + significant_text: + field: "derived_text" + size: 10 + min_doc_count: 1 + +--- +"Test scripted_metric aggregation": +- do: + catch: /A document doesn't have a value for a field/ + search: + index: test + body: + size: 0 + aggs: + scripted_metric_agg: + scripted_metric: + init_script: "state.arr = []" + map_script: "state.arr.add(doc.derived_long.value)" + combine_script: "return 0" + reduce_script: "return 0" + +--- +"Test geo_distance aggregation on derived_geo": +- do: + catch: /aggregation_execution_exception/ + search: + index: test + rest_total_hits_as_int: true + body: + size: 0 + aggs: + distance: + geo_distance: + field: derived_geo + origin: "35.7796, -78.6382" + ranges: + - to: 1000000 + - from: 1000000 + to: 5000000 + - from: 5000000 diff --git a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePlugin.java b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePlugin.java index 2a2de9debb9d9..488b9e632aa2a 100644 --- a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePlugin.java +++ b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePlugin.java @@ -97,6 +97,8 @@ public Map> getResponseProces new TruncateHitsResponseProcessor.Factory(), CollapseResponseProcessor.TYPE, new CollapseResponseProcessor.Factory(), + SplitResponseProcessor.TYPE, + new SplitResponseProcessor.Factory(), SortResponseProcessor.TYPE, new SortResponseProcessor.Factory() ) diff --git a/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePluginTests.java b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePluginTests.java index 404842742629c..e10f06da29ba0 100644 --- a/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePluginTests.java +++ b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePluginTests.java @@ -47,6 +47,7 @@ public void testResponseProcessorAllowlist() throws IOException { List.of("rename_field", "truncate_hits", "collapse"), SearchPipelineCommonModulePlugin::getResponseProcessors ); + runAllowlistTest(key, List.of("split", "sort"), SearchPipelineCommonModulePlugin::getResponseProcessors); final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, @@ -82,7 +83,7 @@ public void testAllowlistNotSpecified() throws IOException { try (SearchPipelineCommonModulePlugin plugin = new SearchPipelineCommonModulePlugin()) { assertEquals(Set.of("oversample", "filter_query", "script"), plugin.getRequestProcessors(createParameters(settings)).keySet()); assertEquals( - Set.of("rename_field", "truncate_hits", "collapse", "sort"), + Set.of("rename_field", "truncate_hits", "collapse", "split", "sort"), plugin.getResponseProcessors(createParameters(settings)).keySet() ); assertEquals(Set.of(), plugin.getSearchPhaseResultsProcessors(createParameters(settings)).keySet()); diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.111.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.111.Final.jar.sha1 deleted file mode 100644 index 6784ac6c3b64f..0000000000000 --- a/modules/transport-netty4/licenses/netty-buffer-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b54863f578939e135d3b3aea610284ae57c188cf \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.112.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..5c26883046fed --- /dev/null +++ b/modules/transport-netty4/licenses/netty-buffer-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +bdc12df04bb6858890b8aa108060b5b365a26102 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.111.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.111.Final.jar.sha1 deleted file mode 100644 index 3d86194de9213..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a6762ec00a6d268f9980741f5b755838bcd658bf \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.112.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..1fd224fdd0b44 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +c87f2ec3d9a97bd2b793d16817abb2bab93a7fc3 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.111.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.111.Final.jar.sha1 deleted file mode 100644 index 4ef1adb818300..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c6ecbc452321e632bf3cea0f9758839b650455c7 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.112.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..22d35128c3ad5 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +81af1040bfa977f98dd0e1bd9639513ea862ca04 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 deleted file mode 100644 index 06c86b8fda557..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f0cca5df75bfb4f858d0435f601d8b1cae1de054 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.112.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..d4767d06b22bf --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http2-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +7fa28b510f0f16f4d5d7188b86bef59e048f62f9 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.111.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.111.Final.jar.sha1 deleted file mode 100644 index 16cb1cce7f504..0000000000000 --- a/modules/transport-netty4/licenses/netty-common-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -58210befcb31adbcadd5724966a061444db91863 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.112.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..47af3100f0f2d --- /dev/null +++ b/modules/transport-netty4/licenses/netty-common-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +b2798069092a981a832b7510d0462ee9efb7a80e \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.111.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.111.Final.jar.sha1 deleted file mode 100644 index 2f70f791f65ed..0000000000000 --- a/modules/transport-netty4/licenses/netty-handler-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2bc6a58ad2e9e279634b6e55022e8dcd3c175cc4 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.112.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..8b30272861770 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-handler-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +3d5e2d5bcc6baeeb8c13a230980c6132a778e036 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.111.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.111.Final.jar.sha1 deleted file mode 100644 index 621cbf58f3133..0000000000000 --- a/modules/transport-netty4/licenses/netty-resolver-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3493179999f211dc49714319f81da2be86523a3b \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.112.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..1a094fa19a623 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-resolver-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +58a631d9d44c4ed7cc0dcc9cffa6641da9374d72 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.111.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.111.Final.jar.sha1 deleted file mode 100644 index ac96e7545ed58..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -24e97cf14ea9d80afe4c5ab69066b587fccc154a \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.112.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..5fbfde0836e0c --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +77cd136dd3843f5e7cbcf68c824975d745c49ddb \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 deleted file mode 100644 index 0847ac3034db7..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -acafc128cddafa021bc0b48b0788eb0e118add5e \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.112.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..8dad0e3104dc8 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +b50ff619cdcdc48e748cba3405c9988529f28f60 \ No newline at end of file diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index d631855013527..81ac52b97cefa 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -66,7 +66,7 @@ dependencies { runtimeOnly "com.optimaize.languagedetector:language-detector:0.6" runtimeOnly "com.google.guava:guava:${versions.guava}" // Other dependencies - api 'org.tukaani:xz:1.9' + api 'org.tukaani:xz:1.10' api "commons-io:commons-io:${versions.commonsio}" api "org.slf4j:slf4j-api:${versions.slf4j}" diff --git a/plugins/ingest-attachment/licenses/xz-1.10.jar.sha1 b/plugins/ingest-attachment/licenses/xz-1.10.jar.sha1 new file mode 100644 index 0000000000000..e3757c19ce5ab --- /dev/null +++ b/plugins/ingest-attachment/licenses/xz-1.10.jar.sha1 @@ -0,0 +1 @@ +1be8166f89e035a56c6bfc67dbc423996fe577e2 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/xz-1.9.jar.sha1 b/plugins/ingest-attachment/licenses/xz-1.9.jar.sha1 deleted file mode 100644 index c3e22d167212f..0000000000000 --- a/plugins/ingest-attachment/licenses/xz-1.9.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1ea4bec1a921180164852c65006d928617bd2caf \ No newline at end of file diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 7bd7be1481a2f..e76556f24cc23 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -44,11 +44,11 @@ opensearchplugin { } dependencies { - api 'com.azure:azure-core:1.49.1' + api 'com.azure:azure-core:1.51.0' api 'com.azure:azure-json:1.1.0' - api 'com.azure:azure-xml:1.0.0' + api 'com.azure:azure-xml:1.1.0' api 'com.azure:azure-storage-common:12.25.1' - api 'com.azure:azure-core-http-netty:1.15.1' + api 'com.azure:azure-core-http-netty:1.15.3' api "io.netty:netty-codec-dns:${versions.netty}" api "io.netty:netty-codec-socks:${versions.netty}" api "io.netty:netty-codec-http2:${versions.netty}" @@ -61,7 +61,7 @@ dependencies { // Start of transitive dependencies for azure-identity api 'com.microsoft.azure:msal4j-persistence-extension:1.3.0' api "net.java.dev.jna:jna-platform:${versions.jna}" - api 'com.microsoft.azure:msal4j:1.16.1' + api 'com.microsoft.azure:msal4j:1.17.0' api 'com.nimbusds:oauth2-oidc-sdk:11.9.1' api 'com.nimbusds:nimbus-jose-jwt:9.40' api 'com.nimbusds:content-type:2.3' diff --git a/plugins/repository-azure/licenses/azure-core-1.49.1.jar.sha1 b/plugins/repository-azure/licenses/azure-core-1.49.1.jar.sha1 deleted file mode 100644 index d487c08c26e94..0000000000000 --- a/plugins/repository-azure/licenses/azure-core-1.49.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a7c44282eaa0f5a3be4b920d6a057509adfe8674 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-core-1.51.0.jar.sha1 b/plugins/repository-azure/licenses/azure-core-1.51.0.jar.sha1 new file mode 100644 index 0000000000000..7200f59af2f9a --- /dev/null +++ b/plugins/repository-azure/licenses/azure-core-1.51.0.jar.sha1 @@ -0,0 +1 @@ +ff5d0aedf75ca45ec0ace24673f790d2f7a57096 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-core-http-netty-1.15.1.jar.sha1 b/plugins/repository-azure/licenses/azure-core-http-netty-1.15.1.jar.sha1 deleted file mode 100644 index 3a0747a0daacb..0000000000000 --- a/plugins/repository-azure/licenses/azure-core-http-netty-1.15.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -036f7466a521aa99c79a491a9cf20444667df78b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-core-http-netty-1.15.3.jar.sha1 b/plugins/repository-azure/licenses/azure-core-http-netty-1.15.3.jar.sha1 new file mode 100644 index 0000000000000..3cea52ba67ce5 --- /dev/null +++ b/plugins/repository-azure/licenses/azure-core-http-netty-1.15.3.jar.sha1 @@ -0,0 +1 @@ +03b5bd5f5c16eea71f130119dbfb1fe5239f806a \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-xml-1.0.0.jar.sha1 b/plugins/repository-azure/licenses/azure-xml-1.0.0.jar.sha1 deleted file mode 100644 index 798ec5d95c6ac..0000000000000 --- a/plugins/repository-azure/licenses/azure-xml-1.0.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ba584703bd47e9e789343ee3332f0f5a64f7f187 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-xml-1.1.0.jar.sha1 b/plugins/repository-azure/licenses/azure-xml-1.1.0.jar.sha1 new file mode 100644 index 0000000000000..4f9cfcac02f6e --- /dev/null +++ b/plugins/repository-azure/licenses/azure-xml-1.1.0.jar.sha1 @@ -0,0 +1 @@ +8218a00c07f9f66d5dc7ae2ba613da6890867497 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/msal4j-1.16.1.jar.sha1 b/plugins/repository-azure/licenses/msal4j-1.16.1.jar.sha1 deleted file mode 100644 index 7d24922196be4..0000000000000 --- a/plugins/repository-azure/licenses/msal4j-1.16.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4ad89b4632ef9abab883114e77c079843a206862 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/msal4j-1.17.0.jar.sha1 b/plugins/repository-azure/licenses/msal4j-1.17.0.jar.sha1 new file mode 100644 index 0000000000000..34101c989eecd --- /dev/null +++ b/plugins/repository-azure/licenses/msal4j-1.17.0.jar.sha1 @@ -0,0 +1 @@ +7d37157da92b719f250b0023234ac9dda922a2a5 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.111.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.111.Final.jar.sha1 deleted file mode 100644 index 5e3f819012811..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-dns-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f988dbb527efb0e7cf7d444cc50b0fc3f5f380ec \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.112.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..a42a41b6387c8 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-dns-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +06724b184ee870ecc4d8fc36931beeb3c387b0ee \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 deleted file mode 100644 index 06c86b8fda557..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f0cca5df75bfb4f858d0435f601d8b1cae1de054 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.112.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..d4767d06b22bf --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-http2-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +7fa28b510f0f16f4d5d7188b86bef59e048f62f9 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.111.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.111.Final.jar.sha1 deleted file mode 100644 index 226ee06d39d6c..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-socks-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ea52ef6617a9b69b0baaebb7f0b80373527f9607 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.112.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..5291a16c10448 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-socks-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +9aed7e78c467d06a47a45b5b27466380a6427e2f \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.111.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.111.Final.jar.sha1 deleted file mode 100644 index dcc2b0c7ca923..0000000000000 --- a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1e459c8630bb7c942b79a97e62dd728798de6a8c \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.112.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..cf50574b87da0 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +b23c87a85451b3b0e7c3e8e89698cea6831a8418 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.111.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.111.Final.jar.sha1 deleted file mode 100644 index b22ad6784809b..0000000000000 --- a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5ac6a3d96935129ba45ea768ad30e31cad0d8c4d \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.112.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..24e8177190e04 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +375872f1c16bb51aac016ff6ee4f5d28b1288d4d \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 deleted file mode 100644 index 0847ac3034db7..0000000000000 --- a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -acafc128cddafa021bc0b48b0788eb0e118add5e \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.112.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..8dad0e3104dc8 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +b50ff619cdcdc48e748cba3405c9988529f28f60 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.1.21.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.1.21.jar.sha1 deleted file mode 100644 index 21c16c7430016..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-core-1.1.21.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -acb98bd08107287c454ce74e7b1ed8e7a018a662 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.1.22.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.1.22.jar.sha1 new file mode 100644 index 0000000000000..cc894568c5760 --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-core-1.1.22.jar.sha1 @@ -0,0 +1 @@ +08356b59b29f86e7142c9daca0434653a64ae64b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.1.21.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.1.21.jar.sha1 deleted file mode 100644 index 648df22873d56..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-http-1.1.21.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b83542bb35630ef815b4177e3c670f62e952e695 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.1.22.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.1.22.jar.sha1 new file mode 100644 index 0000000000000..2402813f831ce --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-http-1.1.22.jar.sha1 @@ -0,0 +1 @@ +2faf64b3822b0512f15d72a325e2826eb8564413 \ No newline at end of file diff --git a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java index 71ffd0fd959f1..970388498ee26 100644 --- a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java +++ b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java @@ -157,7 +157,7 @@ private BlobContainer createBlobContainer(final int maxRetries) { + "/"; clientSettings.put(ENDPOINT_SUFFIX_SETTING.getConcreteSettingForNamespace(clientName).getKey(), endpoint); clientSettings.put(MAX_RETRIES_SETTING.getConcreteSettingForNamespace(clientName).getKey(), maxRetries); - clientSettings.put(TIMEOUT_SETTING.getConcreteSettingForNamespace(clientName).getKey(), TimeValue.timeValueMillis(2000)); + clientSettings.put(TIMEOUT_SETTING.getConcreteSettingForNamespace(clientName).getKey(), TimeValue.timeValueMillis(5000)); final MockSecureSettings secureSettings = new MockSecureSettings(); secureSettings.setString(ACCOUNT_SETTING.getConcreteSettingForNamespace(clientName).getKey(), "account"); @@ -171,7 +171,7 @@ RequestRetryOptions createRetryPolicy(final AzureStorageSettings azureStorageSet return new RequestRetryOptions( RetryPolicyType.EXPONENTIAL, azureStorageSettings.getMaxRetries(), - 1, + 5, 10L, 100L, secondaryHost diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 884fb1333404a..eeb5e2cd88317 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -66,17 +66,17 @@ dependencies { } api 'org.apache.htrace:htrace-core4:4.2.0-incubating' api "org.apache.logging.log4j:log4j-core:${versions.log4j}" - api 'org.apache.avro:avro:1.11.3' + api 'org.apache.avro:avro:1.12.0' api 'com.google.code.gson:gson:2.11.0' runtimeOnly "com.google.guava:guava:${versions.guava}" api "commons-logging:commons-logging:${versions.commonslogging}" - api 'commons-cli:commons-cli:1.8.0' + api 'commons-cli:commons-cli:1.9.0' api "commons-codec:commons-codec:${versions.commonscodec}" api 'commons-collections:commons-collections:3.2.2' api "org.apache.commons:commons-compress:${versions.commonscompress}" api 'org.apache.commons:commons-configuration2:2.11.0' api "commons-io:commons-io:${versions.commonsio}" - api 'org.apache.commons:commons-lang3:3.15.0' + api 'org.apache.commons:commons-lang3:3.16.0' implementation 'com.google.re2j:re2j:1.7' api 'javax.servlet:servlet-api:2.5' api "org.slf4j:slf4j-api:${versions.slf4j}" @@ -425,19 +425,6 @@ thirdPartyAudit { 'org.apache.hadoop.shaded.org.apache.curator.shaded.com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper', 'org.apache.hadoop.shaded.org.apache.curator.shaded.com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper$1', 'org.apache.hadoop.shaded.org.xbill.DNS.spi.DNSJavaNameServiceDescriptor', - - 'org.apache.avro.reflect.FieldAccessUnsafe', - 'org.apache.avro.reflect.FieldAccessUnsafe$UnsafeBooleanField', - 'org.apache.avro.reflect.FieldAccessUnsafe$UnsafeByteField', - 'org.apache.avro.reflect.FieldAccessUnsafe$UnsafeCachedField', - 'org.apache.avro.reflect.FieldAccessUnsafe$UnsafeCharField', - 'org.apache.avro.reflect.FieldAccessUnsafe$UnsafeCustomEncodedField', - 'org.apache.avro.reflect.FieldAccessUnsafe$UnsafeDoubleField', - 'org.apache.avro.reflect.FieldAccessUnsafe$UnsafeFloatField', - 'org.apache.avro.reflect.FieldAccessUnsafe$UnsafeIntField', - 'org.apache.avro.reflect.FieldAccessUnsafe$UnsafeLongField', - 'org.apache.avro.reflect.FieldAccessUnsafe$UnsafeObjectField', - 'org.apache.avro.reflect.FieldAccessUnsafe$UnsafeShortField', ) } diff --git a/plugins/repository-hdfs/licenses/avro-1.11.3.jar.sha1 b/plugins/repository-hdfs/licenses/avro-1.11.3.jar.sha1 deleted file mode 100644 index fb43ecbcf22c9..0000000000000 --- a/plugins/repository-hdfs/licenses/avro-1.11.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -02b463409b373bff9ece09f54a43d42da5cea55a \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/avro-1.12.0.jar.sha1 b/plugins/repository-hdfs/licenses/avro-1.12.0.jar.sha1 new file mode 100644 index 0000000000000..83f7bb3677159 --- /dev/null +++ b/plugins/repository-hdfs/licenses/avro-1.12.0.jar.sha1 @@ -0,0 +1 @@ +6e692a464b213f6df49f8e3e7fcf42df0dbb7639 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-cli-1.8.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-cli-1.8.0.jar.sha1 deleted file mode 100644 index 65102052409ea..0000000000000 --- a/plugins/repository-hdfs/licenses/commons-cli-1.8.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -41a4bff12057eecb6daaf9c7f36c237815be3da1 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-cli-1.9.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-cli-1.9.0.jar.sha1 new file mode 100644 index 0000000000000..9a97a11dbe8d5 --- /dev/null +++ b/plugins/repository-hdfs/licenses/commons-cli-1.9.0.jar.sha1 @@ -0,0 +1 @@ +e1cdfa8bf40ccbb7440b2d1232f9f45bb20a1844 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-lang3-3.15.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-lang3-3.15.0.jar.sha1 deleted file mode 100644 index 4b1179c935946..0000000000000 --- a/plugins/repository-hdfs/licenses/commons-lang3-3.15.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -21581109b4be710ea4b195d5760392ec284f9f11 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-lang3-3.16.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-lang3-3.16.0.jar.sha1 new file mode 100644 index 0000000000000..ef4f1c1fc2002 --- /dev/null +++ b/plugins/repository-hdfs/licenses/commons-lang3-3.16.0.jar.sha1 @@ -0,0 +1 @@ +3eb54effe40946dfb06dc5cd6c7ce4116cd51ea4 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.111.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.111.Final.jar.sha1 deleted file mode 100644 index 076124a7d1f89..0000000000000 --- a/plugins/repository-hdfs/licenses/netty-all-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8fba10bb4911517eb1bdcc05ef392499dda4d5ac \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.112.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..7c36b789e839c --- /dev/null +++ b/plugins/repository-hdfs/licenses/netty-all-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +d6b2e543749a86957777a46cf68aaa337cc558cb \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.111.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.111.Final.jar.sha1 deleted file mode 100644 index 6784ac6c3b64f..0000000000000 --- a/plugins/repository-s3/licenses/netty-buffer-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b54863f578939e135d3b3aea610284ae57c188cf \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.112.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..5c26883046fed --- /dev/null +++ b/plugins/repository-s3/licenses/netty-buffer-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +bdc12df04bb6858890b8aa108060b5b365a26102 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.111.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.111.Final.jar.sha1 deleted file mode 100644 index 3d86194de9213..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a6762ec00a6d268f9980741f5b755838bcd658bf \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.112.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..1fd224fdd0b44 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +c87f2ec3d9a97bd2b793d16817abb2bab93a7fc3 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.111.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.111.Final.jar.sha1 deleted file mode 100644 index 4ef1adb818300..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c6ecbc452321e632bf3cea0f9758839b650455c7 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.112.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..22d35128c3ad5 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +81af1040bfa977f98dd0e1bd9639513ea862ca04 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 deleted file mode 100644 index 06c86b8fda557..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f0cca5df75bfb4f858d0435f601d8b1cae1de054 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.112.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..d4767d06b22bf --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http2-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +7fa28b510f0f16f4d5d7188b86bef59e048f62f9 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.111.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.111.Final.jar.sha1 deleted file mode 100644 index 16cb1cce7f504..0000000000000 --- a/plugins/repository-s3/licenses/netty-common-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -58210befcb31adbcadd5724966a061444db91863 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.112.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..47af3100f0f2d --- /dev/null +++ b/plugins/repository-s3/licenses/netty-common-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +b2798069092a981a832b7510d0462ee9efb7a80e \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.111.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.111.Final.jar.sha1 deleted file mode 100644 index 2f70f791f65ed..0000000000000 --- a/plugins/repository-s3/licenses/netty-handler-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2bc6a58ad2e9e279634b6e55022e8dcd3c175cc4 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.112.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..8b30272861770 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-handler-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +3d5e2d5bcc6baeeb8c13a230980c6132a778e036 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.111.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.111.Final.jar.sha1 deleted file mode 100644 index 621cbf58f3133..0000000000000 --- a/plugins/repository-s3/licenses/netty-resolver-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3493179999f211dc49714319f81da2be86523a3b \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.112.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..1a094fa19a623 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-resolver-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +58a631d9d44c4ed7cc0dcc9cffa6641da9374d72 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.111.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.111.Final.jar.sha1 deleted file mode 100644 index ac96e7545ed58..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -24e97cf14ea9d80afe4c5ab69066b587fccc154a \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.112.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..5fbfde0836e0c --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +77cd136dd3843f5e7cbcf68c824975d745c49ddb \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.111.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.111.Final.jar.sha1 deleted file mode 100644 index 97001777eadf5..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8b97d32eb1489043e478deea99bd93ce487b82f6 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.112.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..0196dacfe92ba --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +67e590356eb53c20aaabd67f61ae66f628e62e3d \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 deleted file mode 100644 index 0847ac3034db7..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -acafc128cddafa021bc0b48b0788eb0e118add5e \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.112.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..8dad0e3104dc8 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +b50ff619cdcdc48e748cba3405c9988529f28f60 \ No newline at end of file diff --git a/plugins/telemetry-otel/build.gradle b/plugins/telemetry-otel/build.gradle index 66d172e3dc7f3..872d928aa093f 100644 --- a/plugins/telemetry-otel/build.gradle +++ b/plugins/telemetry-otel/build.gradle @@ -86,7 +86,9 @@ thirdPartyAudit { 'io.opentelemetry.sdk.autoconfigure.spi.traces.ConfigurableSpanExporterProvider', 'kotlin.io.path.PathsKt', 'io.opentelemetry.sdk.autoconfigure.spi.traces.ConfigurableSpanExporterProvider', - 'io.opentelemetry.sdk.autoconfigure.spi.internal.AutoConfigureListener' + 'io.opentelemetry.sdk.autoconfigure.spi.internal.AutoConfigureListener', + 'io.opentelemetry.sdk.autoconfigure.spi.internal.ComponentProvider', + 'io.opentelemetry.sdk.autoconfigure.spi.internal.StructuredConfigProperties' ) } diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-1.40.0.jar.sha1 deleted file mode 100644 index 04ec81edf969c..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-api-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6db562f2b74ffaa7253d740e9aa7a3c4f2e392ec \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..ead8fb235fa12 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-api-1.41.0.jar.sha1 @@ -0,0 +1 @@ +ec5ad3b420c9fba4b340e85a3199fd0f2accd023 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.40.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.40.0-alpha.jar.sha1 deleted file mode 100644 index bcd7c886b5f6c..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.40.0-alpha.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -43115633361430a3c6aaa39fd78363014ac79270 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.41.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.41.0-alpha.jar.sha1 new file mode 100644 index 0000000000000..b601a4fb5246f --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.41.0-alpha.jar.sha1 @@ -0,0 +1 @@ +fd387313cc37a6e93062e9a80a2526634d22cb19 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-context-1.40.0.jar.sha1 deleted file mode 100644 index 9716ec518c886..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-context-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bf1db0f288b9baaabdb439ab6179b673b751511e \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-context-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..74b7cb25cdfe5 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-context-1.41.0.jar.sha1 @@ -0,0 +1 @@ +3d7cf15ef425053e24e825160ca7b4ac08d721aa \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.40.0.jar.sha1 deleted file mode 100644 index c0e79b05aa675..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b883b179c242a1761df2d408fe01ec41b17327a3 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..d8d8f75850cb6 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.41.0.jar.sha1 @@ -0,0 +1 @@ +cf92f4c1b60c2359c12f6f323f6a2a623c333910 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.40.0.jar.sha1 deleted file mode 100644 index 1df0ad183c475..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a8c1f9b05ac9fb1259517cf53950ccecaf84ebe1 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..3e1212943f894 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.41.0.jar.sha1 @@ -0,0 +1 @@ +8dee21440b811004ecc1c36c1cd44f9d3494546c \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.40.0.jar.sha1 deleted file mode 100644 index ebeb639a8459c..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8d8b92bcdb0ace48fb5764cc1ad7a0de197d5b8c \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..21a29cc8445e5 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.41.0.jar.sha1 @@ -0,0 +1 @@ +d86e60b6d49e389ebe5797d42a7288a20d30c162 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.40.0.jar.sha1 deleted file mode 100644 index b630c808d4763..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -80fa10130cc7e7626e2581aa7c5871eab7381889 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..ae522ac698aa8 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.41.0.jar.sha1 @@ -0,0 +1 @@ +aeba3075b8dfd97779edadc0a3711d999bb0e396 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.40.0.jar.sha1 deleted file mode 100644 index eda90dc825e6f..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -006dcdbf8eb911ad4d11c54fa824f5a97f582850 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..a741d0a167d60 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.41.0.jar.sha1 @@ -0,0 +1 @@ +368d7905d6a0a313c63e3a91f895a3a08500519e \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.40.0.jar.sha1 deleted file mode 100644 index cdd7dc6551b33..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -59f260c5412b79a5a40c7d433600248727cd195a \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..972e7de1c74be --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.41.0.jar.sha1 @@ -0,0 +1 @@ +c740e8f7d0d914d6acd310ac53901bb8753c6e8d \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.40.0.jar.sha1 deleted file mode 100644 index 668291498bbae..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7042214012232a5d6a251aca4aa5932014a4946b \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..c56ca0b9e8169 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.41.0.jar.sha1 @@ -0,0 +1 @@ +b820861f85ba83db0ad896c47f723208d7473d5a \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.40.0.jar.sha1 deleted file mode 100644 index 74f0786e21954..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1c6b884d65f79d40429263ac0ab7ed1422237837 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..39db6cb73727f --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.41.0.jar.sha1 @@ -0,0 +1 @@ +f88ee292f5605c87dfe85c8d90131bce9f0b3b8e \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.40.0.jar.sha1 deleted file mode 100644 index 23ef1bf6e6b2c..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a1c9b33a8660ace82aecb7f1c7ea50093dc87f0a \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..6dcd496e033d3 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.41.0.jar.sha1 @@ -0,0 +1 @@ +9d1200befb28e3e9f61073ac3de23cc55e509dc7 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.40.0.jar.sha1 deleted file mode 100644 index aea753f0df18b..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5145f077bf2821ad243617baf8c1810d29af8566 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..161e400f87077 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.41.0.jar.sha1 @@ -0,0 +1 @@ +d9bbc2e2e800317d72fbf3141ae8391e95fa6229 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.26.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.26.0-alpha.jar.sha1 deleted file mode 100644 index 7124dcb31da3f..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.26.0-alpha.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -955de1d2de4d3d2bb6ba2498f19c9a06da2f3956 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.27.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.27.0-alpha.jar.sha1 new file mode 100644 index 0000000000000..e986b4b53388e --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.27.0-alpha.jar.sha1 @@ -0,0 +1 @@ +906d916bee46f60260c09314284b5948c54a0662 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.111.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.111.Final.jar.sha1 deleted file mode 100644 index 6784ac6c3b64f..0000000000000 --- a/plugins/transport-nio/licenses/netty-buffer-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b54863f578939e135d3b3aea610284ae57c188cf \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.112.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..5c26883046fed --- /dev/null +++ b/plugins/transport-nio/licenses/netty-buffer-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +bdc12df04bb6858890b8aa108060b5b365a26102 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.111.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.111.Final.jar.sha1 deleted file mode 100644 index 3d86194de9213..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a6762ec00a6d268f9980741f5b755838bcd658bf \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.112.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..1fd224fdd0b44 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +c87f2ec3d9a97bd2b793d16817abb2bab93a7fc3 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.111.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.111.Final.jar.sha1 deleted file mode 100644 index 4ef1adb818300..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-http-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c6ecbc452321e632bf3cea0f9758839b650455c7 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.112.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..22d35128c3ad5 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-http-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +81af1040bfa977f98dd0e1bd9639513ea862ca04 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.111.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.111.Final.jar.sha1 deleted file mode 100644 index 16cb1cce7f504..0000000000000 --- a/plugins/transport-nio/licenses/netty-common-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -58210befcb31adbcadd5724966a061444db91863 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.112.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..47af3100f0f2d --- /dev/null +++ b/plugins/transport-nio/licenses/netty-common-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +b2798069092a981a832b7510d0462ee9efb7a80e \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.111.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.111.Final.jar.sha1 deleted file mode 100644 index 2f70f791f65ed..0000000000000 --- a/plugins/transport-nio/licenses/netty-handler-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2bc6a58ad2e9e279634b6e55022e8dcd3c175cc4 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.112.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..8b30272861770 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-handler-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +3d5e2d5bcc6baeeb8c13a230980c6132a778e036 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.111.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.111.Final.jar.sha1 deleted file mode 100644 index 621cbf58f3133..0000000000000 --- a/plugins/transport-nio/licenses/netty-resolver-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3493179999f211dc49714319f81da2be86523a3b \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.112.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..1a094fa19a623 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-resolver-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +58a631d9d44c4ed7cc0dcc9cffa6641da9374d72 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.111.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.111.Final.jar.sha1 deleted file mode 100644 index ac96e7545ed58..0000000000000 --- a/plugins/transport-nio/licenses/netty-transport-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -24e97cf14ea9d80afe4c5ab69066b587fccc154a \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.112.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..5fbfde0836e0c --- /dev/null +++ b/plugins/transport-nio/licenses/netty-transport-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +77cd136dd3843f5e7cbcf68c824975d745c49ddb \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/build.gradle b/plugins/transport-reactor-netty4/build.gradle index 1a94def3fdff1..089e57f062a9f 100644 --- a/plugins/transport-reactor-netty4/build.gradle +++ b/plugins/transport-reactor-netty4/build.gradle @@ -46,7 +46,7 @@ dependencies { api "io.projectreactor.netty:reactor-netty-core:${versions.reactor_netty}" testImplementation "org.apache.logging.log4j:log4j-slf4j-impl:${versions.log4j}" - testImplementation "io.projectreactor:reactor-test:${versions.reactor}" + javaRestTestImplementation "io.projectreactor:reactor-test:${versions.reactor}" testImplementation project(":modules:transport-netty4") } @@ -80,6 +80,10 @@ javaRestTest { systemProperty 'opensearch.set.netty.runtime.available.processors', 'false' } +testClusters.javaRestTest { + setting 'http.type', 'reactor-netty4' +} + thirdPartyAudit { ignoreMissingClasses( 'com.aayushatharva.brotli4j.Brotli4jLoader', diff --git a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.111.Final.jar.sha1 deleted file mode 100644 index 6784ac6c3b64f..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b54863f578939e135d3b3aea610284ae57c188cf \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.112.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..5c26883046fed --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +bdc12df04bb6858890b8aa108060b5b365a26102 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.111.Final.jar.sha1 deleted file mode 100644 index 3d86194de9213..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a6762ec00a6d268f9980741f5b755838bcd658bf \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.112.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..1fd224fdd0b44 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +c87f2ec3d9a97bd2b793d16817abb2bab93a7fc3 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.111.Final.jar.sha1 deleted file mode 100644 index 5e3f819012811..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f988dbb527efb0e7cf7d444cc50b0fc3f5f380ec \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.112.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..a42a41b6387c8 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +06724b184ee870ecc4d8fc36931beeb3c387b0ee \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.111.Final.jar.sha1 deleted file mode 100644 index 4ef1adb818300..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c6ecbc452321e632bf3cea0f9758839b650455c7 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.112.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..22d35128c3ad5 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +81af1040bfa977f98dd0e1bd9639513ea862ca04 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 deleted file mode 100644 index 06c86b8fda557..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f0cca5df75bfb4f858d0435f601d8b1cae1de054 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.112.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..d4767d06b22bf --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +7fa28b510f0f16f4d5d7188b86bef59e048f62f9 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.111.Final.jar.sha1 deleted file mode 100644 index 16cb1cce7f504..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -58210befcb31adbcadd5724966a061444db91863 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.112.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..47af3100f0f2d --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +b2798069092a981a832b7510d0462ee9efb7a80e \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.111.Final.jar.sha1 deleted file mode 100644 index 2f70f791f65ed..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2bc6a58ad2e9e279634b6e55022e8dcd3c175cc4 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.112.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..8b30272861770 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +3d5e2d5bcc6baeeb8c13a230980c6132a778e036 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.111.Final.jar.sha1 deleted file mode 100644 index 621cbf58f3133..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3493179999f211dc49714319f81da2be86523a3b \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.112.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..1a094fa19a623 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +58a631d9d44c4ed7cc0dcc9cffa6641da9374d72 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.111.Final.jar.sha1 deleted file mode 100644 index b22ad6784809b..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5ac6a3d96935129ba45ea768ad30e31cad0d8c4d \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.112.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..24e8177190e04 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +375872f1c16bb51aac016ff6ee4f5d28b1288d4d \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.111.Final.jar.sha1 deleted file mode 100644 index ac96e7545ed58..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -24e97cf14ea9d80afe4c5ab69066b587fccc154a \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.112.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..5fbfde0836e0c --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +77cd136dd3843f5e7cbcf68c824975d745c49ddb \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 deleted file mode 100644 index 0847ac3034db7..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -acafc128cddafa021bc0b48b0788eb0e118add5e \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.112.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..8dad0e3104dc8 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +b50ff619cdcdc48e748cba3405c9988529f28f60 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.21.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.21.jar.sha1 deleted file mode 100644 index 21c16c7430016..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.21.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -acb98bd08107287c454ce74e7b1ed8e7a018a662 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.22.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.22.jar.sha1 new file mode 100644 index 0000000000000..cc894568c5760 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.22.jar.sha1 @@ -0,0 +1 @@ +08356b59b29f86e7142c9daca0434653a64ae64b \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.21.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.21.jar.sha1 deleted file mode 100644 index 648df22873d56..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.21.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b83542bb35630ef815b4177e3c670f62e952e695 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.22.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.22.jar.sha1 new file mode 100644 index 0000000000000..2402813f831ce --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.22.jar.sha1 @@ -0,0 +1 @@ +2faf64b3822b0512f15d72a325e2826eb8564413 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4BadRequestIT.java b/plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4BadRequestIT.java new file mode 100644 index 0000000000000..62834483b5e9b --- /dev/null +++ b/plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4BadRequestIT.java @@ -0,0 +1,115 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.rest; + +import org.opensearch.client.Request; +import org.opensearch.client.RequestOptions; +import org.opensearch.client.Response; +import org.opensearch.client.ResponseException; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.http.HttpTransportSettings; +import org.opensearch.test.rest.OpenSearchRestTestCase; +import org.opensearch.test.rest.yaml.ObjectPath; + +import java.io.IOException; +import java.nio.charset.Charset; +import java.util.Map; + +import static org.opensearch.core.rest.RestStatus.REQUEST_URI_TOO_LONG; +import static org.hamcrest.Matchers.equalTo; + +public class ReactorNetty4BadRequestIT extends OpenSearchRestTestCase { + + public void testBadRequest() throws IOException { + final Response response = client().performRequest(new Request("GET", "/_nodes/settings")); + final ObjectPath objectPath = ObjectPath.createFromResponse(response); + final Map map = objectPath.evaluate("nodes"); + int maxMaxInitialLineLength = Integer.MIN_VALUE; + final Setting httpMaxInitialLineLength = HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH; + final String key = httpMaxInitialLineLength.getKey().substring("http.".length()); + for (Map.Entry entry : map.entrySet()) { + @SuppressWarnings("unchecked") + final Map settings = (Map) ((Map) entry.getValue()).get("settings"); + final int maxIntialLineLength; + if (settings.containsKey("http")) { + @SuppressWarnings("unchecked") + final Map httpSettings = (Map) settings.get("http"); + if (httpSettings.containsKey(key)) { + maxIntialLineLength = ByteSizeValue.parseBytesSizeValue((String) httpSettings.get(key), key).bytesAsInt(); + } else { + maxIntialLineLength = httpMaxInitialLineLength.getDefault(Settings.EMPTY).bytesAsInt(); + } + } else { + maxIntialLineLength = httpMaxInitialLineLength.getDefault(Settings.EMPTY).bytesAsInt(); + } + maxMaxInitialLineLength = Math.max(maxMaxInitialLineLength, maxIntialLineLength); + } + + final String path = "/" + new String(new byte[maxMaxInitialLineLength], Charset.forName("UTF-8")).replace('\0', 'a'); + final ResponseException e = expectThrows( + ResponseException.class, + () -> client().performRequest(new Request(randomFrom("GET", "POST", "PUT"), path)) + ); + // The reactor-netty implementation does not provide a hook to customize or intercept request decoder errors at the moment (see + // please https://github.com/reactor/reactor-netty/issues/3327). + assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(REQUEST_URI_TOO_LONG.getStatus())); + } + + public void testInvalidParameterValue() throws IOException { + final Request request = new Request("GET", "/_cluster/settings"); + request.addParameter("pretty", "neither-true-nor-false"); + final ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(request)); + final Response response = e.getResponse(); + assertThat(response.getStatusLine().getStatusCode(), equalTo(400)); + final ObjectPath objectPath = ObjectPath.createFromResponse(response); + final Map map = objectPath.evaluate("error"); + assertThat(map.get("type"), equalTo("illegal_argument_exception")); + assertThat(map.get("reason"), equalTo("Failed to parse value [neither-true-nor-false] as only [true] or [false] are allowed.")); + } + + public void testInvalidHeaderValue() throws IOException { + final Request request = new Request("GET", "/_cluster/settings"); + final RequestOptions.Builder options = request.getOptions().toBuilder(); + options.addHeader("Content-Type", "\t"); + request.setOptions(options); + final ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(request)); + final Response response = e.getResponse(); + assertThat(response.getStatusLine().getStatusCode(), equalTo(400)); + final ObjectPath objectPath = ObjectPath.createFromResponse(response); + final Map map = objectPath.evaluate("error"); + assertThat(map.get("type"), equalTo("content_type_header_exception")); + assertThat(map.get("reason"), equalTo("java.lang.IllegalArgumentException: invalid Content-Type header []")); + } +} diff --git a/plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4HeadBodyIsEmptyIT.java b/plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4HeadBodyIsEmptyIT.java new file mode 100644 index 0000000000000..663eb9ef6e946 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4HeadBodyIsEmptyIT.java @@ -0,0 +1,204 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.rest; + +import org.opensearch.client.Request; +import org.opensearch.client.Response; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.test.rest.OpenSearchRestTestCase; +import org.hamcrest.Matcher; + +import java.io.IOException; +import java.util.Map; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonMap; +import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.core.rest.RestStatus.NOT_FOUND; +import static org.opensearch.core.rest.RestStatus.OK; +import static org.hamcrest.Matchers.greaterThan; + +public class ReactorNetty4HeadBodyIsEmptyIT extends OpenSearchRestTestCase { + public void testHeadRoot() throws IOException { + headTestCase("/", emptyMap(), greaterThan(0)); + headTestCase("/", singletonMap("pretty", ""), greaterThan(0)); + headTestCase("/", singletonMap("pretty", "true"), greaterThan(0)); + } + + private void createTestDoc() throws IOException { + createTestDoc("test"); + } + + private void createTestDoc(final String indexName) throws IOException { + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + { + builder.field("test", "test"); + } + builder.endObject(); + Request request = new Request("PUT", "/" + indexName + "/_doc/" + "1"); + request.setJsonEntity(builder.toString()); + client().performRequest(request); + } + } + + public void testDocumentExists() throws IOException { + createTestDoc(); + headTestCase("/test/_doc/1", emptyMap(), greaterThan(0)); + headTestCase("/test/_doc/1", singletonMap("pretty", "true"), greaterThan(0)); + headTestCase("/test/_doc/2", emptyMap(), NOT_FOUND.getStatus(), greaterThan(0)); + } + + public void testIndexExists() throws IOException { + createTestDoc(); + headTestCase("/test", emptyMap(), greaterThan(0)); + headTestCase("/test", singletonMap("pretty", "true"), greaterThan(0)); + } + + public void testAliasExists() throws IOException { + createTestDoc(); + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + { + builder.startArray("actions"); + { + builder.startObject(); + { + builder.startObject("add"); + { + builder.field("index", "test"); + builder.field("alias", "test_alias"); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endArray(); + } + builder.endObject(); + + Request request = new Request("POST", "/_aliases"); + request.setJsonEntity(builder.toString()); + client().performRequest(request); + headTestCase("/_alias/test_alias", emptyMap(), greaterThan(0)); + headTestCase("/test/_alias/test_alias", emptyMap(), greaterThan(0)); + } + } + + public void testAliasDoesNotExist() throws IOException { + createTestDoc(); + headTestCase("/_alias/test_alias", emptyMap(), NOT_FOUND.getStatus(), greaterThan(0)); + headTestCase("/test/_alias/test_alias", emptyMap(), NOT_FOUND.getStatus(), greaterThan(0)); + } + + public void testTemplateExists() throws IOException { + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + { + builder.array("index_patterns", "*"); + builder.startObject("settings"); + { + builder.field("number_of_replicas", 0); + } + builder.endObject(); + } + builder.endObject(); + + Request request = new Request("PUT", "/_template/template"); + request.setJsonEntity(builder.toString()); + client().performRequest(request); + headTestCase("/_template/template", emptyMap(), greaterThan(0)); + } + } + + public void testGetSourceAction() throws IOException { + createTestDoc(); + headTestCase("/test/_source/1", emptyMap(), greaterThan(0)); + headTestCase("/test/_source/2", emptyMap(), NOT_FOUND.getStatus(), greaterThan(0)); + + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + { + builder.startObject("mappings"); + { + builder.startObject("_source"); + { + builder.field("enabled", false); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + + Request request = new Request("PUT", "/test-no-source"); + request.setJsonEntity(builder.toString()); + client().performRequest(request); + createTestDoc("test-no-source"); + headTestCase("/test-no-source/_source/1", emptyMap(), NOT_FOUND.getStatus(), greaterThan(0)); + } + } + + public void testException() throws IOException { + /* + * This will throw an index not found exception which will be sent on the channel; previously when handling HEAD requests that would + * throw an exception, the content was swallowed and a content length header of zero was returned. Instead of swallowing the content + * we now let it rise up to the upstream channel so that it can compute the content length that would be returned. This test case is + * a test for this situation. + */ + headTestCase("/index-not-found-exception", emptyMap(), NOT_FOUND.getStatus(), greaterThan(0)); + } + + private void headTestCase(final String url, final Map params, final Matcher matcher) throws IOException { + headTestCase(url, params, OK.getStatus(), matcher); + } + + private void headTestCase( + final String url, + final Map params, + final int expectedStatusCode, + final Matcher matcher, + final String... expectedWarnings + ) throws IOException { + Request request = new Request("HEAD", url); + for (Map.Entry param : params.entrySet()) { + request.addParameter(param.getKey(), param.getValue()); + } + request.setOptions(expectWarnings(expectedWarnings)); + Response response = client().performRequest(request); + assertEquals(expectedStatusCode, response.getStatusLine().getStatusCode()); + assertThat(Integer.valueOf(response.getHeader("Content-Length")), matcher); + assertNull("HEAD requests shouldn't have a response body but " + url + " did", response.getEntity()); + } + +} diff --git a/plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4StreamingIT.java b/plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4StreamingIT.java new file mode 100644 index 0000000000000..c564e289e3f88 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4StreamingIT.java @@ -0,0 +1,139 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest; + +import org.opensearch.client.Request; +import org.opensearch.client.Response; +import org.opensearch.client.ResponseException; +import org.opensearch.client.StreamingRequest; +import org.opensearch.client.StreamingResponse; +import org.opensearch.test.rest.OpenSearchRestTestCase; +import org.opensearch.test.rest.yaml.ObjectPath; +import org.junit.After; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.time.Duration; +import java.util.stream.IntStream; +import java.util.stream.Stream; + +import reactor.core.publisher.Flux; +import reactor.test.StepVerifier; +import reactor.test.scheduler.VirtualTimeScheduler; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.collection.IsEmptyCollection.empty; + +public class ReactorNetty4StreamingIT extends OpenSearchRestTestCase { + @After + @Override + public void tearDown() throws Exception { + final Request request = new Request("DELETE", "/test-streaming"); + request.addParameter("ignore_unavailable", "true"); + + final Response response = client().performRequest(request); + assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); + + super.tearDown(); + } + + public void testStreamingRequest() throws IOException { + final VirtualTimeScheduler scheduler = VirtualTimeScheduler.create(true); + + final Stream stream = IntStream.range(1, 6) + .mapToObj(id -> "{ \"index\": { \"_index\": \"test-streaming\", \"_id\": \"" + id + "\" } }\n" + "{ \"name\": \"josh\" }\n"); + + final Duration delay = Duration.ofMillis(1); + final StreamingRequest streamingRequest = new StreamingRequest<>( + "POST", + "/_bulk/stream", + Flux.fromStream(stream).delayElements(delay, scheduler).map(s -> ByteBuffer.wrap(s.getBytes(StandardCharsets.UTF_8))) + ); + streamingRequest.addParameter("refresh", "true"); + + final StreamingResponse streamingResponse = client().streamRequest(streamingRequest); + scheduler.advanceTimeBy(delay); /* emit first element */ + + StepVerifier.create(Flux.from(streamingResponse.getBody()).map(b -> new String(b.array(), StandardCharsets.UTF_8))) + .expectNextMatches(s -> s.contains("\"result\":\"created\"") && s.contains("\"_id\":\"1\"")) + .then(() -> scheduler.advanceTimeBy(delay)) + .expectNextMatches(s -> s.contains("\"result\":\"created\"") && s.contains("\"_id\":\"2\"")) + .then(() -> scheduler.advanceTimeBy(delay)) + .expectNextMatches(s -> s.contains("\"result\":\"created\"") && s.contains("\"_id\":\"3\"")) + .then(() -> scheduler.advanceTimeBy(delay)) + .expectNextMatches(s -> s.contains("\"result\":\"created\"") && s.contains("\"_id\":\"4\"")) + .then(() -> scheduler.advanceTimeBy(delay)) + .expectNextMatches(s -> s.contains("\"result\":\"created\"") && s.contains("\"_id\":\"5\"")) + .then(() -> scheduler.advanceTimeBy(delay)) + .expectComplete() + .verify(); + + assertThat(streamingResponse.getStatusLine().getStatusCode(), equalTo(200)); + assertThat(streamingResponse.getWarnings(), empty()); + + final Request request = new Request("GET", "/test-streaming/_count"); + final Response response = client().performRequest(request); + final ObjectPath objectPath = ObjectPath.createFromResponse(response); + final Integer count = objectPath.evaluate("count"); + assertThat(count, equalTo(5)); + } + + public void testStreamingBadRequest() throws IOException { + final Stream stream = Stream.of( + "{ \"index\": { \"_index\": \"test-streaming\", \"_id\": \"1\" } }\n" + "{ \"name\": \"josh\" }\n" + ); + + final StreamingRequest streamingRequest = new StreamingRequest<>( + "POST", + "/_bulk/stream", + Flux.fromStream(stream).map(s -> ByteBuffer.wrap(s.getBytes(StandardCharsets.UTF_8))) + ); + streamingRequest.addParameter("refresh", "not-supported-policy"); + + final StreamingResponse streamingResponse = client().streamRequest(streamingRequest); + StepVerifier.create(Flux.from(streamingResponse.getBody()).map(b -> new String(b.array(), StandardCharsets.UTF_8))) + .expectErrorMatches( + ex -> ex instanceof ResponseException && ((ResponseException) ex).getResponse().getStatusLine().getStatusCode() == 400 + ) + .verify(Duration.ofSeconds(10)); + assertThat(streamingResponse.getStatusLine().getStatusCode(), equalTo(400)); + assertThat(streamingResponse.getWarnings(), empty()); + } + + public void testStreamingBadStream() throws IOException { + final VirtualTimeScheduler scheduler = VirtualTimeScheduler.create(true); + + final Stream stream = Stream.of( + "{ \"index\": { \"_index\": \"test-streaming\", \"_id\": \"1\" } }\n" + "{ \"name\": \"josh\" }\n", + "{ \"name\": \"josh\" }\n" + ); + + final Duration delay = Duration.ofMillis(1); + final StreamingRequest streamingRequest = new StreamingRequest<>( + "POST", + "/_bulk/stream", + Flux.fromStream(stream).delayElements(delay, scheduler).map(s -> ByteBuffer.wrap(s.getBytes(StandardCharsets.UTF_8))) + ); + + final StreamingResponse streamingResponse = client().streamRequest(streamingRequest); + scheduler.advanceTimeBy(delay); /* emit first element */ + + StepVerifier.create(Flux.from(streamingResponse.getBody()).map(b -> new String(b.array(), StandardCharsets.UTF_8))) + .expectNextMatches(s -> s.contains("\"result\":\"created\"") && s.contains("\"_id\":\"1\"")) + .then(() -> scheduler.advanceTimeBy(delay)) + .expectNextMatches(s -> s.contains("\"type\":\"illegal_argument_exception\"")) + .then(() -> scheduler.advanceTimeBy(delay)) + .expectComplete() + .verify(); + + assertThat(streamingResponse.getStatusLine().getStatusCode(), equalTo(200)); + assertThat(streamingResponse.getWarnings(), empty()); + } +} diff --git a/plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4StreamingStressIT.java b/plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4StreamingStressIT.java new file mode 100644 index 0000000000000..a978af1b11db4 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4StreamingStressIT.java @@ -0,0 +1,95 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest; + +import org.apache.hc.core5.http.ConnectionClosedException; +import org.opensearch.client.Request; +import org.opensearch.client.Response; +import org.opensearch.client.StreamingRequest; +import org.opensearch.client.StreamingResponse; +import org.opensearch.test.rest.OpenSearchRestTestCase; +import org.junit.After; + +import java.io.InterruptedIOException; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.time.Duration; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Stream; + +import reactor.core.publisher.Flux; +import reactor.test.subscriber.TestSubscriber; + +import static org.hamcrest.CoreMatchers.anyOf; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.CoreMatchers.not; +import static org.hamcrest.collection.IsEmptyCollection.empty; + +public class ReactorNetty4StreamingStressIT extends OpenSearchRestTestCase { + @After + @Override + public void tearDown() throws Exception { + final Request request = new Request("DELETE", "/test-stress-streaming"); + request.addParameter("ignore_unavailable", "true"); + + final Response response = adminClient().performRequest(request); + assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); + + super.tearDown(); + } + + public void testCloseClientStreamingRequest() throws Exception { + final AtomicInteger id = new AtomicInteger(0); + final Stream stream = Stream.generate( + () -> "{ \"index\": { \"_index\": \"test-stress-streaming\", \"_id\": \"" + + id.incrementAndGet() + + "\" } }\n" + + "{ \"name\": \"josh\" }\n" + ); + + final StreamingRequest streamingRequest = new StreamingRequest<>( + "POST", + "/_bulk/stream", + Flux.fromStream(stream).delayElements(Duration.ofMillis(500)).map(s -> ByteBuffer.wrap(s.getBytes(StandardCharsets.UTF_8))) + ); + streamingRequest.addParameter("refresh", "true"); + + final StreamingResponse streamingResponse = client().streamRequest(streamingRequest); + TestSubscriber subscriber = TestSubscriber.create(); + streamingResponse.getBody().subscribe(subscriber); + + final ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); + try { + // Await for subscriber to receive at least one chunk + assertBusy(() -> assertThat(subscriber.getReceivedOnNext(), not(empty()))); + + // Close client forceably + executor.schedule(() -> { + client().close(); + return null; + }, 2, TimeUnit.SECONDS); + + // Await for subscriber to terminate + subscriber.block(Duration.ofSeconds(10)); + assertThat( + subscriber.expectTerminalError(), + anyOf(instanceOf(InterruptedIOException.class), instanceOf(ConnectionClosedException.class)) + ); + } finally { + executor.shutdown(); + if (executor.awaitTermination(1, TimeUnit.SECONDS) == false) { + executor.shutdownNow(); + } + } + } +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransport.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransport.java index 906bbfd072da8..7f4a8f6cdef02 100644 --- a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransport.java +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransport.java @@ -44,6 +44,7 @@ import java.util.List; import java.util.Optional; +import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufAllocator; import io.netty.channel.ChannelOption; import io.netty.channel.socket.nio.NioChannelOption; @@ -390,7 +391,9 @@ protected Publisher incomingRequest(HttpServerRequest request, HttpServerR response.chunkedTransfer(false); response.compression(true); r.headers().forEach(h -> response.addHeader(h.getKey(), h.getValue())); - return Mono.from(response.sendObject(r.content())); + + final ByteBuf content = r.content().copy(); + return Mono.from(response.sendObject(content)); }); } } diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4NonStreamingHttpChannel.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4NonStreamingHttpChannel.java index 7df0b3c0c35fe..3dae2d57cf6a6 100644 --- a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4NonStreamingHttpChannel.java +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4NonStreamingHttpChannel.java @@ -55,9 +55,14 @@ public void addCloseListener(ActionListener listener) { @Override public void sendResponse(HttpResponse response, ActionListener listener) { - emitter.next(createResponse(response)); - listener.onResponse(null); - emitter.complete(); + try { + emitter.next(createResponse(response)); + listener.onResponse(null); + emitter.complete(); + } catch (final Exception ex) { + emitter.error(ex); + listener.onFailure(ex); + } } @Override diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingHttpChannel.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingHttpChannel.java index 56dadea0477c5..1aa03aa9967e2 100644 --- a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingHttpChannel.java +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingHttpChannel.java @@ -101,6 +101,8 @@ public void receiveChunk(HttpChunk message) { lastChunkReceived = true; producer.complete(); } + } catch (final Exception ex) { + producer.error(ex); } finally { message.close(); } diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingRequestConsumer.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingRequestConsumer.java index f34f54e561021..8ed6710c8a1e3 100644 --- a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingRequestConsumer.java +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingRequestConsumer.java @@ -44,7 +44,7 @@ public void subscribe(Subscriber s) { } HttpChunk createChunk(HttpContent chunk, boolean last) { - return new ReactorNetty4HttpChunk(chunk.content().retain(), last); + return new ReactorNetty4HttpChunk(chunk.copy().content(), last); } StreamingHttpChannel httpChannel() { diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingResponseProducer.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingResponseProducer.java index 616edccdfc396..6aaccc500072b 100644 --- a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingResponseProducer.java +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingResponseProducer.java @@ -21,7 +21,11 @@ class ReactorNetty4StreamingResponseProducer implements StreamingHttpContentSend private volatile FluxSink emitter; ReactorNetty4StreamingResponseProducer() { - this.sender = Flux.create(emitter -> this.emitter = emitter); + this.sender = Flux.create(emitter -> register(emitter)); + } + + private void register(FluxSink emitter) { + this.emitter = emitter; } @Override diff --git a/plugins/workload-management/build.gradle b/plugins/workload-management/build.gradle new file mode 100644 index 0000000000000..cb14d22ef149f --- /dev/null +++ b/plugins/workload-management/build.gradle @@ -0,0 +1,21 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +apply plugin: 'opensearch.yaml-rest-test' +apply plugin: 'opensearch.internal-cluster-test' + +opensearchplugin { + description 'OpenSearch Workload Management Plugin.' + classname 'org.opensearch.plugin.wlm.WorkloadManagementPlugin' +} + +dependencies { +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPlugin.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPlugin.java new file mode 100644 index 0000000000000..64f510fa1db67 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPlugin.java @@ -0,0 +1,81 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm; + +import org.opensearch.action.ActionRequest; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.common.inject.Module; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.IndexScopedSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.settings.SettingsFilter; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.plugin.wlm.action.CreateQueryGroupAction; +import org.opensearch.plugin.wlm.action.DeleteQueryGroupAction; +import org.opensearch.plugin.wlm.action.GetQueryGroupAction; +import org.opensearch.plugin.wlm.action.TransportCreateQueryGroupAction; +import org.opensearch.plugin.wlm.action.TransportDeleteQueryGroupAction; +import org.opensearch.plugin.wlm.action.TransportGetQueryGroupAction; +import org.opensearch.plugin.wlm.rest.RestCreateQueryGroupAction; +import org.opensearch.plugin.wlm.rest.RestDeleteQueryGroupAction; +import org.opensearch.plugin.wlm.rest.RestGetQueryGroupAction; +import org.opensearch.plugin.wlm.service.QueryGroupPersistenceService; +import org.opensearch.plugins.ActionPlugin; +import org.opensearch.plugins.Plugin; +import org.opensearch.rest.RestController; +import org.opensearch.rest.RestHandler; + +import java.util.Collection; +import java.util.List; +import java.util.function.Supplier; + +/** + * Plugin class for WorkloadManagement + */ +public class WorkloadManagementPlugin extends Plugin implements ActionPlugin { + + /** + * Default constructor + */ + public WorkloadManagementPlugin() {} + + @Override + public List> getActions() { + return List.of( + new ActionPlugin.ActionHandler<>(CreateQueryGroupAction.INSTANCE, TransportCreateQueryGroupAction.class), + new ActionPlugin.ActionHandler<>(GetQueryGroupAction.INSTANCE, TransportGetQueryGroupAction.class), + new ActionPlugin.ActionHandler<>(DeleteQueryGroupAction.INSTANCE, TransportDeleteQueryGroupAction.class) + ); + } + + @Override + public List getRestHandlers( + Settings settings, + RestController restController, + ClusterSettings clusterSettings, + IndexScopedSettings indexScopedSettings, + SettingsFilter settingsFilter, + IndexNameExpressionResolver indexNameExpressionResolver, + Supplier nodesInCluster + ) { + return List.of(new RestCreateQueryGroupAction(), new RestGetQueryGroupAction(), new RestDeleteQueryGroupAction()); + } + + @Override + public List> getSettings() { + return List.of(QueryGroupPersistenceService.MAX_QUERY_GROUP_COUNT); + } + + @Override + public Collection createGuiceModules() { + return List.of(new WorkloadManagementPluginModule()); + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPluginModule.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPluginModule.java new file mode 100644 index 0000000000000..b7c7805639eb2 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPluginModule.java @@ -0,0 +1,31 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm; + +import org.opensearch.common.inject.AbstractModule; +import org.opensearch.common.inject.Singleton; +import org.opensearch.plugin.wlm.service.QueryGroupPersistenceService; + +/** + * Guice Module to manage WorkloadManagement related objects + */ +public class WorkloadManagementPluginModule extends AbstractModule { + + /** + * Constructor for WorkloadManagementPluginModule + */ + public WorkloadManagementPluginModule() {} + + @Override + protected void configure() { + // Bind QueryGroupPersistenceService as a singleton to ensure a single instance is used, + // preventing multiple throttling key registrations in the constructor. + bind(QueryGroupPersistenceService.class).in(Singleton.class); + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/CreateQueryGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/CreateQueryGroupAction.java new file mode 100644 index 0000000000000..14cb8cfcd125a --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/CreateQueryGroupAction.java @@ -0,0 +1,36 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.action.ActionType; + +/** + * Transport action to create QueryGroup + * + * @opensearch.experimental + */ +public class CreateQueryGroupAction extends ActionType { + + /** + * An instance of CreateQueryGroupAction + */ + public static final CreateQueryGroupAction INSTANCE = new CreateQueryGroupAction(); + + /** + * Name for CreateQueryGroupAction + */ + public static final String NAME = "cluster:admin/opensearch/wlm/query_group/_create"; + + /** + * Default constructor + */ + private CreateQueryGroupAction() { + super(NAME, CreateQueryGroupResponse::new); + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/CreateQueryGroupRequest.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/CreateQueryGroupRequest.java new file mode 100644 index 0000000000000..ff6422be36885 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/CreateQueryGroupRequest.java @@ -0,0 +1,82 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.action.ActionRequest; +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.cluster.metadata.QueryGroup; +import org.opensearch.common.UUIDs; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.XContentParser; +import org.joda.time.Instant; + +import java.io.IOException; + +/** + * A request for create QueryGroup + * User input schema: + * { + * "name": "analytics", + * "resiliency_mode": "enforced", + * "resource_limits": { + * "cpu" : 0.4, + * "memory" : 0.2 + * } + * } + * + * @opensearch.experimental + */ +public class CreateQueryGroupRequest extends ActionRequest { + private final QueryGroup queryGroup; + + /** + * Constructor for CreateQueryGroupRequest + * @param queryGroup - A {@link QueryGroup} object + */ + public CreateQueryGroupRequest(QueryGroup queryGroup) { + this.queryGroup = queryGroup; + } + + /** + * Constructor for CreateQueryGroupRequest + * @param in - A {@link StreamInput} object + */ + public CreateQueryGroupRequest(StreamInput in) throws IOException { + super(in); + queryGroup = new QueryGroup(in); + } + + /** + * Generate a CreateQueryGroupRequest from XContent + * @param parser - A {@link XContentParser} object + */ + public static CreateQueryGroupRequest fromXContent(XContentParser parser) throws IOException { + QueryGroup.Builder builder = QueryGroup.Builder.fromXContent(parser); + return new CreateQueryGroupRequest(builder._id(UUIDs.randomBase64UUID()).updatedAt(Instant.now().getMillis()).build()); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + queryGroup.writeTo(out); + } + + /** + * QueryGroup getter + */ + public QueryGroup getQueryGroup() { + return queryGroup; + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/CreateQueryGroupResponse.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/CreateQueryGroupResponse.java new file mode 100644 index 0000000000000..9a2a8178c0a29 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/CreateQueryGroupResponse.java @@ -0,0 +1,74 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.cluster.metadata.QueryGroup; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; + +/** + * Response for the create API for QueryGroup + * + * @opensearch.experimental + */ +public class CreateQueryGroupResponse extends ActionResponse implements ToXContent, ToXContentObject { + private final QueryGroup queryGroup; + private final RestStatus restStatus; + + /** + * Constructor for CreateQueryGroupResponse + * @param queryGroup - The QueryGroup to be included in the response + * @param restStatus - The restStatus for the response + */ + public CreateQueryGroupResponse(final QueryGroup queryGroup, RestStatus restStatus) { + this.queryGroup = queryGroup; + this.restStatus = restStatus; + } + + /** + * Constructor for CreateQueryGroupResponse + * @param in - A {@link StreamInput} object + */ + public CreateQueryGroupResponse(StreamInput in) throws IOException { + queryGroup = new QueryGroup(in); + restStatus = RestStatus.readFrom(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + queryGroup.writeTo(out); + RestStatus.writeTo(out, restStatus); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return queryGroup.toXContent(builder, params); + } + + /** + * queryGroup getter + */ + public QueryGroup getQueryGroup() { + return queryGroup; + } + + /** + * restStatus getter + */ + public RestStatus getRestStatus() { + return restStatus; + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupAction.java new file mode 100644 index 0000000000000..c78952a2f89ad --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupAction.java @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.action.ActionType; +import org.opensearch.action.support.master.AcknowledgedResponse; + +/** + * Transport action for delete QueryGroup + * + * @opensearch.experimental + */ +public class DeleteQueryGroupAction extends ActionType { + + /** + /** + * An instance of DeleteQueryGroupAction + */ + public static final DeleteQueryGroupAction INSTANCE = new DeleteQueryGroupAction(); + + /** + * Name for DeleteQueryGroupAction + */ + public static final String NAME = "cluster:admin/opensearch/wlm/query_group/_delete"; + + /** + * Default constructor + */ + private DeleteQueryGroupAction() { + super(NAME, AcknowledgedResponse::new); + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupRequest.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupRequest.java new file mode 100644 index 0000000000000..e514943c2c7e9 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupRequest.java @@ -0,0 +1,65 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + * Request for delete QueryGroup + * + * @opensearch.experimental + */ +public class DeleteQueryGroupRequest extends AcknowledgedRequest { + private final String name; + + /** + * Default constructor for DeleteQueryGroupRequest + * @param name - name for the QueryGroup to get + */ + public DeleteQueryGroupRequest(String name) { + this.name = name; + } + + /** + * Constructor for DeleteQueryGroupRequest + * @param in - A {@link StreamInput} object + */ + public DeleteQueryGroupRequest(StreamInput in) throws IOException { + super(in); + name = in.readOptionalString(); + } + + @Override + public ActionRequestValidationException validate() { + if (name == null) { + ActionRequestValidationException actionRequestValidationException = new ActionRequestValidationException(); + actionRequestValidationException.addValidationError("QueryGroup name is missing"); + return actionRequestValidationException; + } + return null; + } + + /** + * Name getter + */ + public String getName() { + return name; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeOptionalString(name); + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/GetQueryGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/GetQueryGroupAction.java new file mode 100644 index 0000000000000..0200185580f7d --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/GetQueryGroupAction.java @@ -0,0 +1,36 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.action.ActionType; + +/** + * Transport action to get QueryGroup + * + * @opensearch.experimental + */ +public class GetQueryGroupAction extends ActionType { + + /** + * An instance of GetQueryGroupAction + */ + public static final GetQueryGroupAction INSTANCE = new GetQueryGroupAction(); + + /** + * Name for GetQueryGroupAction + */ + public static final String NAME = "cluster:admin/opensearch/wlm/query_group/_get"; + + /** + * Default constructor + */ + private GetQueryGroupAction() { + super(NAME, GetQueryGroupResponse::new); + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/GetQueryGroupRequest.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/GetQueryGroupRequest.java new file mode 100644 index 0000000000000..0524c615a84e7 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/GetQueryGroupRequest.java @@ -0,0 +1,64 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; +import org.opensearch.cluster.metadata.QueryGroup; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + * Request for get QueryGroup + * + * @opensearch.experimental + */ +public class GetQueryGroupRequest extends ClusterManagerNodeReadRequest { + final String name; + + /** + * Default constructor for GetQueryGroupRequest + * @param name - name for the QueryGroup to get + */ + public GetQueryGroupRequest(String name) { + this.name = name; + } + + /** + * Constructor for GetQueryGroupRequest + * @param in - A {@link StreamInput} object + */ + public GetQueryGroupRequest(StreamInput in) throws IOException { + super(in); + name = in.readOptionalString(); + } + + @Override + public ActionRequestValidationException validate() { + if (name != null) { + QueryGroup.validateName(name); + } + return null; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeOptionalString(name); + } + + /** + * Name getter + */ + public String getName() { + return name; + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/GetQueryGroupResponse.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/GetQueryGroupResponse.java new file mode 100644 index 0000000000000..547c501e6a28e --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/GetQueryGroupResponse.java @@ -0,0 +1,82 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.cluster.metadata.QueryGroup; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Collection; + +/** + * Response for the get API for QueryGroup + * + * @opensearch.experimental + */ +public class GetQueryGroupResponse extends ActionResponse implements ToXContent, ToXContentObject { + private final Collection queryGroups; + private final RestStatus restStatus; + + /** + * Constructor for GetQueryGroupResponse + * @param queryGroups - The QueryGroup list to be fetched + * @param restStatus - The rest status of the request + */ + public GetQueryGroupResponse(final Collection queryGroups, RestStatus restStatus) { + this.queryGroups = queryGroups; + this.restStatus = restStatus; + } + + /** + * Constructor for GetQueryGroupResponse + * @param in - A {@link StreamInput} object + */ + public GetQueryGroupResponse(StreamInput in) throws IOException { + this.queryGroups = in.readList(QueryGroup::new); + restStatus = RestStatus.readFrom(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeCollection(queryGroups); + RestStatus.writeTo(out, restStatus); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.startArray("query_groups"); + for (QueryGroup group : queryGroups) { + group.toXContent(builder, params); + } + builder.endArray(); + builder.endObject(); + return builder; + } + + /** + * queryGroups getter + */ + public Collection getQueryGroups() { + return queryGroups; + } + + /** + * restStatus getter + */ + public RestStatus getRestStatus() { + return restStatus; + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportCreateQueryGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportCreateQueryGroupAction.java new file mode 100644 index 0000000000000..190ff17261bb4 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportCreateQueryGroupAction.java @@ -0,0 +1,51 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.HandledTransportAction; +import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; +import org.opensearch.plugin.wlm.service.QueryGroupPersistenceService; +import org.opensearch.tasks.Task; +import org.opensearch.transport.TransportService; + +/** + * Transport action to create QueryGroup + * + * @opensearch.experimental + */ +public class TransportCreateQueryGroupAction extends HandledTransportAction { + + private final QueryGroupPersistenceService queryGroupPersistenceService; + + /** + * Constructor for TransportCreateQueryGroupAction + * + * @param actionName - action name + * @param transportService - a {@link TransportService} object + * @param actionFilters - a {@link ActionFilters} object + * @param queryGroupPersistenceService - a {@link QueryGroupPersistenceService} object + */ + @Inject + public TransportCreateQueryGroupAction( + String actionName, + TransportService transportService, + ActionFilters actionFilters, + QueryGroupPersistenceService queryGroupPersistenceService + ) { + super(CreateQueryGroupAction.NAME, transportService, actionFilters, CreateQueryGroupRequest::new); + this.queryGroupPersistenceService = queryGroupPersistenceService; + } + + @Override + protected void doExecute(Task task, CreateQueryGroupRequest request, ActionListener listener) { + queryGroupPersistenceService.persistInClusterStateMetadata(request.getQueryGroup(), listener); + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupAction.java new file mode 100644 index 0000000000000..e4d3908d4a208 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupAction.java @@ -0,0 +1,91 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.plugin.wlm.service.QueryGroupPersistenceService; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; + +/** + * Transport action for delete QueryGroup + * + * @opensearch.experimental + */ +public class TransportDeleteQueryGroupAction extends TransportClusterManagerNodeAction { + + private final QueryGroupPersistenceService queryGroupPersistenceService; + + /** + * Constructor for TransportDeleteQueryGroupAction + * + * @param clusterService - a {@link ClusterService} object + * @param transportService - a {@link TransportService} object + * @param actionFilters - a {@link ActionFilters} object + * @param threadPool - a {@link ThreadPool} object + * @param indexNameExpressionResolver - a {@link IndexNameExpressionResolver} object + * @param queryGroupPersistenceService - a {@link QueryGroupPersistenceService} object + */ + @Inject + public TransportDeleteQueryGroupAction( + ClusterService clusterService, + TransportService transportService, + ActionFilters actionFilters, + ThreadPool threadPool, + IndexNameExpressionResolver indexNameExpressionResolver, + QueryGroupPersistenceService queryGroupPersistenceService + ) { + super( + DeleteQueryGroupAction.NAME, + transportService, + clusterService, + threadPool, + actionFilters, + DeleteQueryGroupRequest::new, + indexNameExpressionResolver + ); + this.queryGroupPersistenceService = queryGroupPersistenceService; + } + + @Override + protected void clusterManagerOperation( + DeleteQueryGroupRequest request, + ClusterState state, + ActionListener listener + ) throws Exception { + queryGroupPersistenceService.deleteInClusterStateMetadata(request, listener); + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected AcknowledgedResponse read(StreamInput in) throws IOException { + return new AcknowledgedResponse(in); + } + + @Override + protected ClusterBlockException checkBlock(DeleteQueryGroupRequest request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportGetQueryGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportGetQueryGroupAction.java new file mode 100644 index 0000000000000..51bb21b255511 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportGetQueryGroupAction.java @@ -0,0 +1,98 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.ResourceNotFoundException; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeReadAction; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.metadata.QueryGroup; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.plugin.wlm.service.QueryGroupPersistenceService; +import org.opensearch.search.pipeline.SearchPipelineService; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.Collection; + +/** + * Transport action to get QueryGroup + * + * @opensearch.experimental + */ +public class TransportGetQueryGroupAction extends TransportClusterManagerNodeReadAction { + private static final Logger logger = LogManager.getLogger(SearchPipelineService.class); + + /** + * Constructor for TransportGetQueryGroupAction + * + * @param clusterService - a {@link ClusterService} object + * @param transportService - a {@link TransportService} object + * @param actionFilters - a {@link ActionFilters} object + * @param threadPool - a {@link ThreadPool} object + * @param indexNameExpressionResolver - a {@link IndexNameExpressionResolver} object + */ + @Inject + public TransportGetQueryGroupAction( + ClusterService clusterService, + TransportService transportService, + ActionFilters actionFilters, + ThreadPool threadPool, + IndexNameExpressionResolver indexNameExpressionResolver + ) { + super( + GetQueryGroupAction.NAME, + transportService, + clusterService, + threadPool, + actionFilters, + GetQueryGroupRequest::new, + indexNameExpressionResolver, + true + ); + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected GetQueryGroupResponse read(StreamInput in) throws IOException { + return new GetQueryGroupResponse(in); + } + + @Override + protected ClusterBlockException checkBlock(GetQueryGroupRequest request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); + } + + @Override + protected void clusterManagerOperation(GetQueryGroupRequest request, ClusterState state, ActionListener listener) + throws Exception { + final String name = request.getName(); + final Collection resultGroups = QueryGroupPersistenceService.getFromClusterStateMetadata(name, state); + + if (resultGroups.isEmpty() && name != null && !name.isEmpty()) { + logger.warn("No QueryGroup exists with the provided name: {}", name); + throw new ResourceNotFoundException("No QueryGroup exists with the provided name: " + name); + } + listener.onResponse(new GetQueryGroupResponse(resultGroups, RestStatus.OK)); + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/package-info.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/package-info.java new file mode 100644 index 0000000000000..9921500df8a81 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Package for the action classes of WorkloadManagementPlugin + */ +package org.opensearch.plugin.wlm.action; diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/package-info.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/package-info.java new file mode 100644 index 0000000000000..84c99967b226b --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Base package for WorkloadManagementPlugin + */ +package org.opensearch.plugin.wlm; diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestCreateQueryGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestCreateQueryGroupAction.java new file mode 100644 index 0000000000000..b0e0af4f9d17f --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestCreateQueryGroupAction.java @@ -0,0 +1,72 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.rest; + +import org.opensearch.client.node.NodeClient; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.plugin.wlm.action.CreateQueryGroupAction; +import org.opensearch.plugin.wlm.action.CreateQueryGroupRequest; +import org.opensearch.plugin.wlm.action.CreateQueryGroupResponse; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.BytesRestResponse; +import org.opensearch.rest.RestChannel; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.RestResponse; +import org.opensearch.rest.action.RestResponseListener; + +import java.io.IOException; +import java.util.List; + +import static org.opensearch.rest.RestRequest.Method.POST; +import static org.opensearch.rest.RestRequest.Method.PUT; + +/** + * Rest action to create a QueryGroup + * + * @opensearch.experimental + */ +public class RestCreateQueryGroupAction extends BaseRestHandler { + + /** + * Constructor for RestCreateQueryGroupAction + */ + public RestCreateQueryGroupAction() {} + + @Override + public String getName() { + return "create_query_group"; + } + + /** + * The list of {@link Route}s that this RestHandler is responsible for handling. + */ + @Override + public List routes() { + return List.of(new Route(POST, "_wlm/query_group/"), new Route(PUT, "_wlm/query_group/")); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + try (XContentParser parser = request.contentParser()) { + CreateQueryGroupRequest createQueryGroupRequest = CreateQueryGroupRequest.fromXContent(parser); + return channel -> client.execute(CreateQueryGroupAction.INSTANCE, createQueryGroupRequest, createQueryGroupResponse(channel)); + } + } + + private RestResponseListener createQueryGroupResponse(final RestChannel channel) { + return new RestResponseListener<>(channel) { + @Override + public RestResponse buildResponse(final CreateQueryGroupResponse response) throws Exception { + return new BytesRestResponse(RestStatus.OK, response.toXContent(channel.newBuilder(), ToXContent.EMPTY_PARAMS)); + } + }; + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestDeleteQueryGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestDeleteQueryGroupAction.java new file mode 100644 index 0000000000000..8ad621cf8a1e4 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestDeleteQueryGroupAction.java @@ -0,0 +1,57 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.rest; + +import org.opensearch.client.node.NodeClient; +import org.opensearch.plugin.wlm.action.DeleteQueryGroupAction; +import org.opensearch.plugin.wlm.action.DeleteQueryGroupRequest; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.RestToXContentListener; + +import java.io.IOException; +import java.util.List; + +import static org.opensearch.rest.RestRequest.Method.DELETE; + +/** + * Rest action to delete a QueryGroup + * + * @opensearch.experimental + */ +public class RestDeleteQueryGroupAction extends BaseRestHandler { + + /** + * Constructor for RestDeleteQueryGroupAction + */ + public RestDeleteQueryGroupAction() {} + + @Override + public String getName() { + return "delete_query_group"; + } + + /** + * The list of {@link Route}s that this RestHandler is responsible for handling. + */ + @Override + public List routes() { + return List.of(new Route(DELETE, "_wlm/query_group/{name}")); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + DeleteQueryGroupRequest deleteQueryGroupRequest = new DeleteQueryGroupRequest(request.param("name")); + deleteQueryGroupRequest.clusterManagerNodeTimeout( + request.paramAsTime("cluster_manager_timeout", deleteQueryGroupRequest.clusterManagerNodeTimeout()) + ); + deleteQueryGroupRequest.timeout(request.paramAsTime("timeout", deleteQueryGroupRequest.timeout())); + return channel -> client.execute(DeleteQueryGroupAction.INSTANCE, deleteQueryGroupRequest, new RestToXContentListener<>(channel)); + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestGetQueryGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestGetQueryGroupAction.java new file mode 100644 index 0000000000000..c250bd2979e98 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestGetQueryGroupAction.java @@ -0,0 +1,68 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.rest; + +import org.opensearch.client.node.NodeClient; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.plugin.wlm.action.GetQueryGroupAction; +import org.opensearch.plugin.wlm.action.GetQueryGroupRequest; +import org.opensearch.plugin.wlm.action.GetQueryGroupResponse; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.BytesRestResponse; +import org.opensearch.rest.RestChannel; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.RestResponse; +import org.opensearch.rest.action.RestResponseListener; + +import java.io.IOException; +import java.util.List; + +import static org.opensearch.rest.RestRequest.Method.GET; + +/** + * Rest action to get a QueryGroup0 + * + * @opensearch.experimental + */ +public class RestGetQueryGroupAction extends BaseRestHandler { + + /** + * Constructor for RestGetQueryGroupAction + */ + public RestGetQueryGroupAction() {} + + @Override + public String getName() { + return "get_query_group"; + } + + /** + * The list of {@link Route}s that this RestHandler is responsible for handling. + */ + @Override + public List routes() { + return List.of(new Route(GET, "_wlm/query_group/{name}"), new Route(GET, "_wlm/query_group/")); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + final GetQueryGroupRequest getQueryGroupRequest = new GetQueryGroupRequest(request.param("name")); + return channel -> client.execute(GetQueryGroupAction.INSTANCE, getQueryGroupRequest, getQueryGroupResponse(channel)); + } + + private RestResponseListener getQueryGroupResponse(final RestChannel channel) { + return new RestResponseListener<>(channel) { + @Override + public RestResponse buildResponse(final GetQueryGroupResponse response) throws Exception { + return new BytesRestResponse(RestStatus.OK, response.toXContent(channel.newBuilder(), ToXContent.EMPTY_PARAMS)); + } + }; + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/package-info.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/package-info.java new file mode 100644 index 0000000000000..7d7cb9028fdb8 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Package for the rest classes of WorkloadManagementPlugin + */ +package org.opensearch.plugin.wlm.rest; diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceService.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceService.java new file mode 100644 index 0000000000000..ba5161a2c855e --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceService.java @@ -0,0 +1,273 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.service; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.ResourceNotFoundException; +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.cluster.AckedClusterStateUpdateTask; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.ClusterStateUpdateTask; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.metadata.QueryGroup; +import org.opensearch.cluster.service.ClusterManagerTaskThrottler; +import org.opensearch.cluster.service.ClusterManagerTaskThrottler.ThrottlingKey; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.Priority; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.plugin.wlm.action.CreateQueryGroupResponse; +import org.opensearch.plugin.wlm.action.DeleteQueryGroupRequest; +import org.opensearch.search.ResourceType; + +import java.util.Collection; +import java.util.EnumMap; +import java.util.Map; +import java.util.Optional; +import java.util.stream.Collectors; + +/** + * This class defines the functions for QueryGroup persistence + */ +public class QueryGroupPersistenceService { + static final String SOURCE = "query-group-persistence-service"; + private static final String CREATE_QUERY_GROUP_THROTTLING_KEY = "create-query-group"; + private static final String DELETE_QUERY_GROUP_THROTTLING_KEY = "delete-query-group"; + private static final Logger logger = LogManager.getLogger(QueryGroupPersistenceService.class); + /** + * max QueryGroup count setting name + */ + public static final String QUERY_GROUP_COUNT_SETTING_NAME = "node.query_group.max_count"; + /** + * default max queryGroup count on any node at any given point in time + */ + private static final int DEFAULT_MAX_QUERY_GROUP_COUNT_VALUE = 100; + /** + * min queryGroup count on any node at any given point in time + */ + private static final int MIN_QUERY_GROUP_COUNT_VALUE = 1; + /** + * max QueryGroup count setting + */ + public static final Setting MAX_QUERY_GROUP_COUNT = Setting.intSetting( + QUERY_GROUP_COUNT_SETTING_NAME, + DEFAULT_MAX_QUERY_GROUP_COUNT_VALUE, + 0, + QueryGroupPersistenceService::validateMaxQueryGroupCount, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + private final ClusterService clusterService; + private volatile int maxQueryGroupCount; + final ThrottlingKey createQueryGroupThrottlingKey; + final ThrottlingKey deleteQueryGroupThrottlingKey; + + /** + * Constructor for QueryGroupPersistenceService + * + * @param clusterService {@link ClusterService} - The cluster service to be used by QueryGroupPersistenceService + * @param settings {@link Settings} - The settings to be used by QueryGroupPersistenceService + * @param clusterSettings {@link ClusterSettings} - The cluster settings to be used by QueryGroupPersistenceService + */ + @Inject + public QueryGroupPersistenceService( + final ClusterService clusterService, + final Settings settings, + final ClusterSettings clusterSettings + ) { + this.clusterService = clusterService; + this.createQueryGroupThrottlingKey = clusterService.registerClusterManagerTask(CREATE_QUERY_GROUP_THROTTLING_KEY, true); + this.deleteQueryGroupThrottlingKey = clusterService.registerClusterManagerTask(DELETE_QUERY_GROUP_THROTTLING_KEY, true); + setMaxQueryGroupCount(MAX_QUERY_GROUP_COUNT.get(settings)); + clusterSettings.addSettingsUpdateConsumer(MAX_QUERY_GROUP_COUNT, this::setMaxQueryGroupCount); + } + + /** + * Set maxQueryGroupCount to be newMaxQueryGroupCount + * @param newMaxQueryGroupCount - the max number of QueryGroup allowed + */ + public void setMaxQueryGroupCount(int newMaxQueryGroupCount) { + validateMaxQueryGroupCount(newMaxQueryGroupCount); + this.maxQueryGroupCount = newMaxQueryGroupCount; + } + + /** + * Validator for maxQueryGroupCount + * @param maxQueryGroupCount - the maxQueryGroupCount number to be verified + */ + private static void validateMaxQueryGroupCount(int maxQueryGroupCount) { + if (maxQueryGroupCount > DEFAULT_MAX_QUERY_GROUP_COUNT_VALUE || maxQueryGroupCount < MIN_QUERY_GROUP_COUNT_VALUE) { + throw new IllegalArgumentException(QUERY_GROUP_COUNT_SETTING_NAME + " should be in range [1-100]."); + } + } + + /** + * Update cluster state to include the new QueryGroup + * @param queryGroup {@link QueryGroup} - the QueryGroup we're currently creating + * @param listener - ActionListener for CreateQueryGroupResponse + */ + public void persistInClusterStateMetadata(QueryGroup queryGroup, ActionListener listener) { + clusterService.submitStateUpdateTask(SOURCE, new ClusterStateUpdateTask(Priority.NORMAL) { + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + return saveQueryGroupInClusterState(queryGroup, currentState); + } + + @Override + public ThrottlingKey getClusterManagerThrottlingKey() { + return createQueryGroupThrottlingKey; + } + + @Override + public void onFailure(String source, Exception e) { + logger.warn("failed to save QueryGroup object due to error: {}, for source: {}.", e.getMessage(), source); + listener.onFailure(e); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + CreateQueryGroupResponse response = new CreateQueryGroupResponse(queryGroup, RestStatus.OK); + listener.onResponse(response); + } + }); + } + + /** + * This method will be executed before we submit the new cluster state + * @param queryGroup - the QueryGroup we're currently creating + * @param currentClusterState - the cluster state before the update + */ + ClusterState saveQueryGroupInClusterState(final QueryGroup queryGroup, final ClusterState currentClusterState) { + final Map existingQueryGroups = currentClusterState.metadata().queryGroups(); + String groupName = queryGroup.getName(); + + // check if maxQueryGroupCount will breach + if (existingQueryGroups.size() == maxQueryGroupCount) { + logger.warn("{} value exceeded its assigned limit of {}.", QUERY_GROUP_COUNT_SETTING_NAME, maxQueryGroupCount); + throw new IllegalStateException("Can't create more than " + maxQueryGroupCount + " QueryGroups in the system."); + } + + // check for duplicate name + Optional findExistingGroup = existingQueryGroups.values() + .stream() + .filter(group -> group.getName().equals(groupName)) + .findFirst(); + if (findExistingGroup.isPresent()) { + logger.warn("QueryGroup with name {} already exists. Not creating a new one.", groupName); + throw new IllegalArgumentException("QueryGroup with name " + groupName + " already exists. Not creating a new one."); + } + + // check if there's any resource allocation that exceed limit of 1.0 + Map totalUsageMap = calculateTotalUsage(existingQueryGroups, queryGroup); + for (ResourceType resourceType : queryGroup.getResourceLimits().keySet()) { + if (totalUsageMap.get(resourceType) > 1) { + logger.warn("Total resource allocation for {} will go above the max limit of 1.0.", resourceType.getName()); + throw new IllegalArgumentException( + "Total resource allocation for " + resourceType.getName() + " will go above the max limit of 1.0." + ); + } + } + + return ClusterState.builder(currentClusterState) + .metadata(Metadata.builder(currentClusterState.metadata()).put(queryGroup).build()) + .build(); + } + + /** + * This method calculates the existing total usage of the all the resource limits + * @param existingQueryGroups - existing QueryGroups in the system + * @param queryGroup - the QueryGroup we're creating or updating + */ + private Map calculateTotalUsage(Map existingQueryGroups, QueryGroup queryGroup) { + final Map map = new EnumMap<>(ResourceType.class); + map.putAll(queryGroup.getResourceLimits()); + for (QueryGroup currGroup : existingQueryGroups.values()) { + if (!currGroup.getName().equals(queryGroup.getName())) { + for (ResourceType resourceType : queryGroup.getResourceLimits().keySet()) { + map.compute(resourceType, (k, v) -> v + currGroup.getResourceLimits().get(resourceType)); + } + } + } + return map; + } + + /** + * Get the QueryGroups with the specified name from cluster state + * @param name - the QueryGroup name we are getting + * @param currentState - current cluster state + */ + public static Collection getFromClusterStateMetadata(String name, ClusterState currentState) { + final Map currentGroups = currentState.getMetadata().queryGroups(); + if (name == null || name.isEmpty()) { + return currentGroups.values(); + } + return currentGroups.values() + .stream() + .filter(group -> group.getName().equals(name)) + .findAny() + .stream() + .collect(Collectors.toList()); + } + + /** + * Modify cluster state to delete the QueryGroup + * @param deleteQueryGroupRequest - request to delete a QueryGroup + * @param listener - ActionListener for AcknowledgedResponse + */ + public void deleteInClusterStateMetadata( + DeleteQueryGroupRequest deleteQueryGroupRequest, + ActionListener listener + ) { + clusterService.submitStateUpdateTask(SOURCE, new AckedClusterStateUpdateTask<>(deleteQueryGroupRequest, listener) { + @Override + public ClusterState execute(ClusterState currentState) { + return deleteQueryGroupInClusterState(deleteQueryGroupRequest.getName(), currentState); + } + + @Override + public ClusterManagerTaskThrottler.ThrottlingKey getClusterManagerThrottlingKey() { + return deleteQueryGroupThrottlingKey; + } + + @Override + protected AcknowledgedResponse newResponse(boolean acknowledged) { + return new AcknowledgedResponse(acknowledged); + } + }); + } + + /** + * Modify cluster state to delete the QueryGroup, and return the new cluster state + * @param name - the name for QueryGroup to be deleted + * @param currentClusterState - current cluster state + */ + ClusterState deleteQueryGroupInClusterState(final String name, final ClusterState currentClusterState) { + final Metadata metadata = currentClusterState.metadata(); + final QueryGroup queryGroupToRemove = metadata.queryGroups() + .values() + .stream() + .filter(queryGroup -> queryGroup.getName().equals(name)) + .findAny() + .orElseThrow(() -> new ResourceNotFoundException("No QueryGroup exists with the provided name: " + name)); + + return ClusterState.builder(currentClusterState).metadata(Metadata.builder(metadata).remove(queryGroupToRemove).build()).build(); + } + + /** + * maxQueryGroupCount getter + */ + public int getMaxQueryGroupCount() { + return maxQueryGroupCount; + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/package-info.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/package-info.java new file mode 100644 index 0000000000000..5848e9c936623 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Package for the service classes of WorkloadManagementPlugin + */ +package org.opensearch.plugin.wlm.service; diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/QueryGroupTestUtils.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/QueryGroupTestUtils.java new file mode 100644 index 0000000000000..5ba1ad5334712 --- /dev/null +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/QueryGroupTestUtils.java @@ -0,0 +1,144 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm; + +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.metadata.QueryGroup; +import org.opensearch.cluster.service.ClusterApplierService; +import org.opensearch.cluster.service.ClusterManagerService; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.plugin.wlm.service.QueryGroupPersistenceService; +import org.opensearch.threadpool.ThreadPool; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Comparator; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.opensearch.cluster.metadata.QueryGroup.builder; +import static org.opensearch.search.ResourceType.fromName; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.mock; + +public class QueryGroupTestUtils { + public static final String NAME_ONE = "query_group_one"; + public static final String NAME_TWO = "query_group_two"; + public static final String _ID_ONE = "AgfUO5Ja9yfsYlONlYi3TQ=="; + public static final String _ID_TWO = "G5iIqHy4g7eK1qIAAAAIH53=1"; + public static final String NAME_NONE_EXISTED = "query_group_none_existed"; + public static final String MEMORY_STRING = "memory"; + public static final String MONITOR_STRING = "monitor"; + public static final long TIMESTAMP_ONE = 4513232413L; + public static final long TIMESTAMP_TWO = 4513232415L; + public static final QueryGroup queryGroupOne = builder().name(NAME_ONE) + ._id(_ID_ONE) + .mode(MONITOR_STRING) + .resourceLimits(Map.of(fromName(MEMORY_STRING), 0.3)) + .updatedAt(TIMESTAMP_ONE) + .build(); + + public static final QueryGroup queryGroupTwo = builder().name(NAME_TWO) + ._id(_ID_TWO) + .mode(MONITOR_STRING) + .resourceLimits(Map.of(fromName(MEMORY_STRING), 0.6)) + .updatedAt(TIMESTAMP_TWO) + .build(); + + public static List queryGroupList() { + List list = new ArrayList<>(); + list.add(queryGroupOne); + list.add(queryGroupTwo); + return list; + } + + public static ClusterState clusterState() { + final Metadata metadata = Metadata.builder().queryGroups(Map.of(_ID_ONE, queryGroupOne, _ID_TWO, queryGroupTwo)).build(); + return ClusterState.builder(new ClusterName("_name")).metadata(metadata).build(); + } + + public static Set> clusterSettingsSet() { + Set> set = new HashSet<>(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + set.add(QueryGroupPersistenceService.MAX_QUERY_GROUP_COUNT); + assertFalse(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS.contains(QueryGroupPersistenceService.MAX_QUERY_GROUP_COUNT)); + return set; + } + + public static Settings settings() { + return Settings.builder().build(); + } + + public static ClusterSettings clusterSettings() { + return new ClusterSettings(settings(), clusterSettingsSet()); + } + + public static QueryGroupPersistenceService queryGroupPersistenceService() { + ClusterApplierService clusterApplierService = new ClusterApplierService( + "name", + settings(), + clusterSettings(), + mock(ThreadPool.class) + ); + clusterApplierService.setInitialState(clusterState()); + ClusterService clusterService = new ClusterService( + settings(), + clusterSettings(), + mock(ClusterManagerService.class), + clusterApplierService + ); + return new QueryGroupPersistenceService(clusterService, settings(), clusterSettings()); + } + + public static Tuple preparePersistenceServiceSetup(Map queryGroups) { + Metadata metadata = Metadata.builder().queryGroups(queryGroups).build(); + Settings settings = Settings.builder().build(); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).metadata(metadata).build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, clusterSettingsSet()); + ClusterApplierService clusterApplierService = new ClusterApplierService( + "name", + settings(), + clusterSettings(), + mock(ThreadPool.class) + ); + clusterApplierService.setInitialState(clusterState); + ClusterService clusterService = new ClusterService( + settings(), + clusterSettings(), + mock(ClusterManagerService.class), + clusterApplierService + ); + QueryGroupPersistenceService queryGroupPersistenceService = new QueryGroupPersistenceService( + clusterService, + settings, + clusterSettings + ); + return new Tuple(queryGroupPersistenceService, clusterState); + } + + public static void assertEqualQueryGroups(Collection collectionOne, Collection collectionTwo) { + assertEquals(collectionOne.size(), collectionTwo.size()); + List listOne = new ArrayList<>(collectionOne); + List listTwo = new ArrayList<>(collectionTwo); + listOne.sort(Comparator.comparing(QueryGroup::getName)); + listTwo.sort(Comparator.comparing(QueryGroup::getName)); + for (int i = 0; i < listOne.size(); i++) { + assertTrue(listOne.get(i).equals(listTwo.get(i))); + } + } +} diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateQueryGroupRequestTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateQueryGroupRequestTests.java new file mode 100644 index 0000000000000..b0fa96a46df80 --- /dev/null +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateQueryGroupRequestTests.java @@ -0,0 +1,40 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.cluster.metadata.QueryGroup; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.assertEqualQueryGroups; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.queryGroupOne; + +public class CreateQueryGroupRequestTests extends OpenSearchTestCase { + + /** + * Test case to verify the serialization and deserialization of CreateQueryGroupRequest. + */ + public void testSerialization() throws IOException { + CreateQueryGroupRequest request = new CreateQueryGroupRequest(queryGroupOne); + BytesStreamOutput out = new BytesStreamOutput(); + request.writeTo(out); + StreamInput streamInput = out.bytes().streamInput(); + CreateQueryGroupRequest otherRequest = new CreateQueryGroupRequest(streamInput); + List list1 = new ArrayList<>(); + List list2 = new ArrayList<>(); + list1.add(queryGroupOne); + list2.add(otherRequest.getQueryGroup()); + assertEqualQueryGroups(list1, list2); + } +} diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateQueryGroupResponseTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateQueryGroupResponseTests.java new file mode 100644 index 0000000000000..ecb9a6b2dc0d2 --- /dev/null +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateQueryGroupResponseTests.java @@ -0,0 +1,66 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.cluster.metadata.QueryGroup; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.plugin.wlm.QueryGroupTestUtils; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.mockito.Mockito.mock; + +public class CreateQueryGroupResponseTests extends OpenSearchTestCase { + + /** + * Test case to verify serialization and deserialization of CreateQueryGroupResponse. + */ + public void testSerialization() throws IOException { + CreateQueryGroupResponse response = new CreateQueryGroupResponse(QueryGroupTestUtils.queryGroupOne, RestStatus.OK); + BytesStreamOutput out = new BytesStreamOutput(); + response.writeTo(out); + StreamInput streamInput = out.bytes().streamInput(); + CreateQueryGroupResponse otherResponse = new CreateQueryGroupResponse(streamInput); + assertEquals(response.getRestStatus(), otherResponse.getRestStatus()); + QueryGroup responseGroup = response.getQueryGroup(); + QueryGroup otherResponseGroup = otherResponse.getQueryGroup(); + List listOne = new ArrayList<>(); + List listTwo = new ArrayList<>(); + listOne.add(responseGroup); + listTwo.add(otherResponseGroup); + QueryGroupTestUtils.assertEqualQueryGroups(listOne, listTwo); + } + + /** + * Test case to validate the toXContent method of CreateQueryGroupResponse. + */ + public void testToXContentCreateQueryGroup() throws IOException { + XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint(); + CreateQueryGroupResponse response = new CreateQueryGroupResponse(QueryGroupTestUtils.queryGroupOne, RestStatus.OK); + String actual = response.toXContent(builder, mock(ToXContent.Params.class)).toString(); + String expected = "{\n" + + " \"_id\" : \"AgfUO5Ja9yfsYlONlYi3TQ==\",\n" + + " \"name\" : \"query_group_one\",\n" + + " \"resiliency_mode\" : \"monitor\",\n" + + " \"updated_at\" : 4513232413,\n" + + " \"resource_limits\" : {\n" + + " \"memory\" : 0.3\n" + + " }\n" + + "}"; + assertEquals(expected, actual); + } +} diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupRequestTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupRequestTests.java new file mode 100644 index 0000000000000..bc2e4f0faca4c --- /dev/null +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupRequestTests.java @@ -0,0 +1,42 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.plugin.wlm.QueryGroupTestUtils; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; + +public class DeleteQueryGroupRequestTests extends OpenSearchTestCase { + + /** + * Test case to verify the serialization and deserialization of DeleteQueryGroupRequest. + */ + public void testSerialization() throws IOException { + DeleteQueryGroupRequest request = new DeleteQueryGroupRequest(QueryGroupTestUtils.NAME_ONE); + assertEquals(QueryGroupTestUtils.NAME_ONE, request.getName()); + BytesStreamOutput out = new BytesStreamOutput(); + request.writeTo(out); + StreamInput streamInput = out.bytes().streamInput(); + DeleteQueryGroupRequest otherRequest = new DeleteQueryGroupRequest(streamInput); + assertEquals(request.getName(), otherRequest.getName()); + } + + /** + * Test case to validate a DeleteQueryGroupRequest. + */ + public void testSerializationWithNull() throws IOException { + DeleteQueryGroupRequest request = new DeleteQueryGroupRequest((String) null); + ActionRequestValidationException actionRequestValidationException = request.validate(); + assertFalse(actionRequestValidationException.getMessage().isEmpty()); + } +} diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/GetQueryGroupRequestTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/GetQueryGroupRequestTests.java new file mode 100644 index 0000000000000..32b5f7ec9e2c3 --- /dev/null +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/GetQueryGroupRequestTests.java @@ -0,0 +1,53 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.plugin.wlm.QueryGroupTestUtils; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; + +public class GetQueryGroupRequestTests extends OpenSearchTestCase { + + /** + * Test case to verify the serialization and deserialization of GetQueryGroupRequest. + */ + public void testSerialization() throws IOException { + GetQueryGroupRequest request = new GetQueryGroupRequest(QueryGroupTestUtils.NAME_ONE); + assertEquals(QueryGroupTestUtils.NAME_ONE, request.getName()); + BytesStreamOutput out = new BytesStreamOutput(); + request.writeTo(out); + StreamInput streamInput = out.bytes().streamInput(); + GetQueryGroupRequest otherRequest = new GetQueryGroupRequest(streamInput); + assertEquals(request.getName(), otherRequest.getName()); + } + + /** + * Test case to verify the serialization and deserialization of GetQueryGroupRequest when name is null. + */ + public void testSerializationWithNull() throws IOException { + GetQueryGroupRequest request = new GetQueryGroupRequest((String) null); + assertNull(request.getName()); + BytesStreamOutput out = new BytesStreamOutput(); + request.writeTo(out); + StreamInput streamInput = out.bytes().streamInput(); + GetQueryGroupRequest otherRequest = new GetQueryGroupRequest(streamInput); + assertEquals(request.getName(), otherRequest.getName()); + } + + /** + * Test case the validation function of GetQueryGroupRequest + */ + public void testValidation() { + GetQueryGroupRequest request = new GetQueryGroupRequest("a".repeat(51)); + assertThrows(IllegalArgumentException.class, request::validate); + } +} diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/GetQueryGroupResponseTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/GetQueryGroupResponseTests.java new file mode 100644 index 0000000000000..774f4b2d8db52 --- /dev/null +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/GetQueryGroupResponseTests.java @@ -0,0 +1,151 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.cluster.metadata.QueryGroup; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.plugin.wlm.QueryGroupTestUtils; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.mockito.Mockito.mock; + +public class GetQueryGroupResponseTests extends OpenSearchTestCase { + + /** + * Test case to verify the serialization and deserialization of GetQueryGroupResponse. + */ + public void testSerializationSingleQueryGroup() throws IOException { + List list = new ArrayList<>(); + list.add(QueryGroupTestUtils.queryGroupOne); + GetQueryGroupResponse response = new GetQueryGroupResponse(list, RestStatus.OK); + assertEquals(response.getQueryGroups(), list); + + BytesStreamOutput out = new BytesStreamOutput(); + response.writeTo(out); + StreamInput streamInput = out.bytes().streamInput(); + + GetQueryGroupResponse otherResponse = new GetQueryGroupResponse(streamInput); + assertEquals(response.getRestStatus(), otherResponse.getRestStatus()); + QueryGroupTestUtils.assertEqualQueryGroups(response.getQueryGroups(), otherResponse.getQueryGroups()); + } + + /** + * Test case to verify the serialization and deserialization of GetQueryGroupResponse when the result contains multiple QueryGroups. + */ + public void testSerializationMultipleQueryGroup() throws IOException { + GetQueryGroupResponse response = new GetQueryGroupResponse(QueryGroupTestUtils.queryGroupList(), RestStatus.OK); + assertEquals(response.getQueryGroups(), QueryGroupTestUtils.queryGroupList()); + + BytesStreamOutput out = new BytesStreamOutput(); + response.writeTo(out); + StreamInput streamInput = out.bytes().streamInput(); + + GetQueryGroupResponse otherResponse = new GetQueryGroupResponse(streamInput); + assertEquals(response.getRestStatus(), otherResponse.getRestStatus()); + assertEquals(2, otherResponse.getQueryGroups().size()); + QueryGroupTestUtils.assertEqualQueryGroups(response.getQueryGroups(), otherResponse.getQueryGroups()); + } + + /** + * Test case to verify the serialization and deserialization of GetQueryGroupResponse when the result is empty. + */ + public void testSerializationNull() throws IOException { + List list = new ArrayList<>(); + GetQueryGroupResponse response = new GetQueryGroupResponse(list, RestStatus.OK); + assertEquals(response.getQueryGroups(), list); + + BytesStreamOutput out = new BytesStreamOutput(); + response.writeTo(out); + StreamInput streamInput = out.bytes().streamInput(); + + GetQueryGroupResponse otherResponse = new GetQueryGroupResponse(streamInput); + assertEquals(response.getRestStatus(), otherResponse.getRestStatus()); + assertEquals(0, otherResponse.getQueryGroups().size()); + } + + /** + * Test case to verify the toXContent of GetQueryGroupResponse. + */ + public void testToXContentGetSingleQueryGroup() throws IOException { + List queryGroupList = new ArrayList<>(); + queryGroupList.add(QueryGroupTestUtils.queryGroupOne); + XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint(); + GetQueryGroupResponse response = new GetQueryGroupResponse(queryGroupList, RestStatus.OK); + String actual = response.toXContent(builder, mock(ToXContent.Params.class)).toString(); + String expected = "{\n" + + " \"query_groups\" : [\n" + + " {\n" + + " \"_id\" : \"AgfUO5Ja9yfsYlONlYi3TQ==\",\n" + + " \"name\" : \"query_group_one\",\n" + + " \"resiliency_mode\" : \"monitor\",\n" + + " \"updated_at\" : 4513232413,\n" + + " \"resource_limits\" : {\n" + + " \"memory\" : 0.3\n" + + " }\n" + + " }\n" + + " ]\n" + + "}"; + assertEquals(expected, actual); + } + + /** + * Test case to verify the toXContent of GetQueryGroupResponse when the result contains multiple QueryGroups. + */ + public void testToXContentGetMultipleQueryGroup() throws IOException { + List queryGroupList = new ArrayList<>(); + queryGroupList.add(QueryGroupTestUtils.queryGroupOne); + queryGroupList.add(QueryGroupTestUtils.queryGroupTwo); + XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint(); + GetQueryGroupResponse response = new GetQueryGroupResponse(queryGroupList, RestStatus.OK); + String actual = response.toXContent(builder, mock(ToXContent.Params.class)).toString(); + String expected = "{\n" + + " \"query_groups\" : [\n" + + " {\n" + + " \"_id\" : \"AgfUO5Ja9yfsYlONlYi3TQ==\",\n" + + " \"name\" : \"query_group_one\",\n" + + " \"resiliency_mode\" : \"monitor\",\n" + + " \"updated_at\" : 4513232413,\n" + + " \"resource_limits\" : {\n" + + " \"memory\" : 0.3\n" + + " }\n" + + " },\n" + + " {\n" + + " \"_id\" : \"G5iIqHy4g7eK1qIAAAAIH53=1\",\n" + + " \"name\" : \"query_group_two\",\n" + + " \"resiliency_mode\" : \"monitor\",\n" + + " \"updated_at\" : 4513232415,\n" + + " \"resource_limits\" : {\n" + + " \"memory\" : 0.6\n" + + " }\n" + + " }\n" + + " ]\n" + + "}"; + assertEquals(expected, actual); + } + + /** + * Test case to verify toXContent of GetQueryGroupResponse when the result contains zero QueryGroup. + */ + public void testToXContentGetZeroQueryGroup() throws IOException { + XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint(); + GetQueryGroupResponse otherResponse = new GetQueryGroupResponse(new ArrayList<>(), RestStatus.OK); + String actual = otherResponse.toXContent(builder, mock(ToXContent.Params.class)).toString(); + String expected = "{\n" + " \"query_groups\" : [ ]\n" + "}"; + assertEquals(expected, actual); + } +} diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupActionTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupActionTests.java new file mode 100644 index 0000000000000..253d65f8da80f --- /dev/null +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupActionTests.java @@ -0,0 +1,63 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.core.action.ActionListener; +import org.opensearch.plugin.wlm.service.QueryGroupPersistenceService; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; + +public class TransportDeleteQueryGroupActionTests extends OpenSearchTestCase { + + ClusterService clusterService = mock(ClusterService.class); + TransportService transportService = mock(TransportService.class); + ActionFilters actionFilters = mock(ActionFilters.class); + ThreadPool threadPool = mock(ThreadPool.class); + IndexNameExpressionResolver indexNameExpressionResolver = mock(IndexNameExpressionResolver.class); + QueryGroupPersistenceService queryGroupPersistenceService = mock(QueryGroupPersistenceService.class); + + TransportDeleteQueryGroupAction action = new TransportDeleteQueryGroupAction( + clusterService, + transportService, + actionFilters, + threadPool, + indexNameExpressionResolver, + queryGroupPersistenceService + ); + + /** + * Test case to validate the construction for TransportDeleteQueryGroupAction + */ + public void testConstruction() { + assertNotNull(action); + assertEquals(ThreadPool.Names.SAME, action.executor()); + } + + /** + * Test case to validate the clusterManagerOperation function in TransportDeleteQueryGroupAction + */ + public void testClusterManagerOperation() throws Exception { + DeleteQueryGroupRequest request = new DeleteQueryGroupRequest("testGroup"); + @SuppressWarnings("unchecked") + ActionListener listener = mock(ActionListener.class); + ClusterState clusterState = mock(ClusterState.class); + action.clusterManagerOperation(request, clusterState, listener); + verify(queryGroupPersistenceService).deleteInClusterStateMetadata(eq(request), eq(listener)); + } +} diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/TransportGetQueryGroupActionTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/TransportGetQueryGroupActionTests.java new file mode 100644 index 0000000000000..755b11a5f4b89 --- /dev/null +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/TransportGetQueryGroupActionTests.java @@ -0,0 +1,47 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.ResourceNotFoundException; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.core.action.ActionListener; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.NAME_NONE_EXISTED; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.NAME_ONE; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.clusterState; +import static org.mockito.Mockito.mock; + +public class TransportGetQueryGroupActionTests extends OpenSearchTestCase { + + /** + * Test case for ClusterManagerOperation function + */ + @SuppressWarnings("unchecked") + public void testClusterManagerOperation() throws Exception { + GetQueryGroupRequest getQueryGroupRequest1 = new GetQueryGroupRequest(NAME_NONE_EXISTED); + GetQueryGroupRequest getQueryGroupRequest2 = new GetQueryGroupRequest(NAME_ONE); + TransportGetQueryGroupAction transportGetQueryGroupAction = new TransportGetQueryGroupAction( + mock(ClusterService.class), + mock(TransportService.class), + mock(ActionFilters.class), + mock(ThreadPool.class), + mock(IndexNameExpressionResolver.class) + ); + assertThrows( + ResourceNotFoundException.class, + () -> transportGetQueryGroupAction.clusterManagerOperation(getQueryGroupRequest1, clusterState(), mock(ActionListener.class)) + ); + transportGetQueryGroupAction.clusterManagerOperation(getQueryGroupRequest2, clusterState(), mock(ActionListener.class)); + } +} diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rest/RestDeleteQueryGroupActionTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rest/RestDeleteQueryGroupActionTests.java new file mode 100644 index 0000000000000..72191e076bb87 --- /dev/null +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rest/RestDeleteQueryGroupActionTests.java @@ -0,0 +1,85 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.rest; + +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.client.node.NodeClient; +import org.opensearch.common.CheckedConsumer; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.plugin.wlm.action.DeleteQueryGroupAction; +import org.opensearch.plugin.wlm.action.DeleteQueryGroupRequest; +import org.opensearch.rest.RestChannel; +import org.opensearch.rest.RestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.RestToXContentListener; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.rest.FakeRestRequest; + +import java.util.List; + +import org.mockito.ArgumentCaptor; + +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.NAME_ONE; +import static org.opensearch.rest.RestRequest.Method.DELETE; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; + +public class RestDeleteQueryGroupActionTests extends OpenSearchTestCase { + /** + * Test case to validate the construction for RestDeleteQueryGroupAction + */ + public void testConstruction() { + RestDeleteQueryGroupAction action = new RestDeleteQueryGroupAction(); + assertNotNull(action); + assertEquals("delete_query_group", action.getName()); + List routes = action.routes(); + assertEquals(1, routes.size()); + RestHandler.Route route = routes.get(0); + assertEquals(DELETE, route.getMethod()); + assertEquals("_wlm/query_group/{name}", route.getPath()); + } + + /** + * Test case to validate the prepareRequest logic for RestDeleteQueryGroupAction + */ + @SuppressWarnings("unchecked") + public void testPrepareRequest() throws Exception { + RestDeleteQueryGroupAction restDeleteQueryGroupAction = new RestDeleteQueryGroupAction(); + NodeClient nodeClient = mock(NodeClient.class); + RestRequest realRequest = new FakeRestRequest(); + realRequest.params().put("name", NAME_ONE); + ; + RestRequest spyRequest = spy(realRequest); + + doReturn(TimeValue.timeValueSeconds(30)).when(spyRequest).paramAsTime(eq("cluster_manager_timeout"), any(TimeValue.class)); + doReturn(TimeValue.timeValueSeconds(60)).when(spyRequest).paramAsTime(eq("timeout"), any(TimeValue.class)); + + CheckedConsumer consumer = restDeleteQueryGroupAction.prepareRequest(spyRequest, nodeClient); + assertNotNull(consumer); + ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(DeleteQueryGroupRequest.class); + ArgumentCaptor> listenerCaptor = ArgumentCaptor.forClass(RestToXContentListener.class); + doNothing().when(nodeClient).execute(eq(DeleteQueryGroupAction.INSTANCE), requestCaptor.capture(), listenerCaptor.capture()); + + consumer.accept(mock(RestChannel.class)); + DeleteQueryGroupRequest capturedRequest = requestCaptor.getValue(); + assertEquals(NAME_ONE, capturedRequest.getName()); + assertEquals(TimeValue.timeValueSeconds(30), capturedRequest.clusterManagerNodeTimeout()); + assertEquals(TimeValue.timeValueSeconds(60), capturedRequest.timeout()); + verify(nodeClient).execute( + eq(DeleteQueryGroupAction.INSTANCE), + any(DeleteQueryGroupRequest.class), + any(RestToXContentListener.class) + ); + } +} diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceServiceTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceServiceTests.java new file mode 100644 index 0000000000000..a516ffdde839e --- /dev/null +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceServiceTests.java @@ -0,0 +1,359 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.service; + +import org.opensearch.ResourceNotFoundException; +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.cluster.AckedClusterStateUpdateTask; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.ClusterStateUpdateTask; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.metadata.QueryGroup; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; +import org.opensearch.plugin.wlm.QueryGroupTestUtils; +import org.opensearch.plugin.wlm.action.CreateQueryGroupResponse; +import org.opensearch.plugin.wlm.action.DeleteQueryGroupRequest; +import org.opensearch.search.ResourceType; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.ThreadPool; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +import org.mockito.ArgumentCaptor; + +import static org.opensearch.cluster.metadata.QueryGroup.builder; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.MEMORY_STRING; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.MONITOR_STRING; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.NAME_NONE_EXISTED; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.NAME_ONE; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.NAME_TWO; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils._ID_ONE; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils._ID_TWO; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.assertEqualQueryGroups; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.clusterSettings; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.clusterSettingsSet; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.clusterState; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.preparePersistenceServiceSetup; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.queryGroupList; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.queryGroupOne; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.queryGroupPersistenceService; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.queryGroupTwo; +import static org.opensearch.plugin.wlm.service.QueryGroupPersistenceService.QUERY_GROUP_COUNT_SETTING_NAME; +import static org.opensearch.plugin.wlm.service.QueryGroupPersistenceService.SOURCE; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.anyString; +import static org.mockito.Mockito.argThat; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + +public class QueryGroupPersistenceServiceTests extends OpenSearchTestCase { + + /** + * Test case to validate the creation logic of a QueryGroup + */ + public void testCreateQueryGroup() { + Tuple setup = preparePersistenceServiceSetup(new HashMap<>()); + QueryGroupPersistenceService queryGroupPersistenceService1 = setup.v1(); + ClusterState clusterState = setup.v2(); + ClusterState newClusterState = queryGroupPersistenceService1.saveQueryGroupInClusterState(queryGroupOne, clusterState); + Map updatedGroupsMap = newClusterState.getMetadata().queryGroups(); + assertEquals(1, updatedGroupsMap.size()); + assertTrue(updatedGroupsMap.containsKey(_ID_ONE)); + List listOne = new ArrayList<>(); + List listTwo = new ArrayList<>(); + listOne.add(queryGroupOne); + listTwo.add(updatedGroupsMap.get(_ID_ONE)); + assertEqualQueryGroups(listOne, listTwo); + } + + /** + * Test case to validate the logic for adding a new QueryGroup to a cluster state that already contains + * an existing QueryGroup + */ + public void testCreateAnotherQueryGroup() { + Tuple setup = preparePersistenceServiceSetup(Map.of(_ID_ONE, queryGroupOne)); + QueryGroupPersistenceService queryGroupPersistenceService1 = setup.v1(); + ClusterState clusterState = setup.v2(); + ClusterState newClusterState = queryGroupPersistenceService1.saveQueryGroupInClusterState(queryGroupTwo, clusterState); + Map updatedGroups = newClusterState.getMetadata().queryGroups(); + assertEquals(2, updatedGroups.size()); + assertTrue(updatedGroups.containsKey(_ID_TWO)); + Collection values = updatedGroups.values(); + assertEqualQueryGroups(queryGroupList(), new ArrayList<>(values)); + } + + /** + * Test case to ensure the error is thrown when we try to create another QueryGroup with duplicate name + */ + public void testCreateQueryGroupDuplicateName() { + Tuple setup = preparePersistenceServiceSetup(Map.of(_ID_ONE, queryGroupOne)); + QueryGroupPersistenceService queryGroupPersistenceService1 = setup.v1(); + ClusterState clusterState = setup.v2(); + QueryGroup toCreate = builder().name(NAME_ONE) + ._id("W5iIqHyhgi4K1qIAAAAIHw==") + .mode(MONITOR_STRING) + .resourceLimits(Map.of(ResourceType.fromName(MEMORY_STRING), 0.3)) + .updatedAt(1690934400000L) + .build(); + assertThrows(RuntimeException.class, () -> queryGroupPersistenceService1.saveQueryGroupInClusterState(toCreate, clusterState)); + } + + /** + * Test case to ensure the error is thrown when we try to create another QueryGroup that will make + * the total resource limits go above 1 + */ + public void testCreateQueryGroupOverflowAllocation() { + Tuple setup = preparePersistenceServiceSetup(Map.of(_ID_TWO, queryGroupTwo)); + QueryGroup toCreate = builder().name(NAME_ONE) + ._id("W5iIqHyhgi4K1qIAAAAIHw==") + .mode(MONITOR_STRING) + .resourceLimits(Map.of(ResourceType.fromName(MEMORY_STRING), 0.41)) + .updatedAt(1690934400000L) + .build(); + + QueryGroupPersistenceService queryGroupPersistenceService1 = setup.v1(); + ClusterState clusterState = setup.v2(); + assertThrows(RuntimeException.class, () -> queryGroupPersistenceService1.saveQueryGroupInClusterState(toCreate, clusterState)); + } + + /** + * Test case to ensure the error is thrown when we already have the max allowed number of QueryGroups, but + * we want to create another one + */ + public void testCreateQueryGroupOverflowCount() { + QueryGroup toCreate = builder().name(NAME_NONE_EXISTED) + ._id("W5iIqHyhgi4K1qIAAAAIHw==") + .mode(MONITOR_STRING) + .resourceLimits(Map.of(ResourceType.fromName(MEMORY_STRING), 0.5)) + .updatedAt(1690934400000L) + .build(); + Metadata metadata = Metadata.builder().queryGroups(Map.of(_ID_ONE, queryGroupOne, _ID_TWO, queryGroupTwo)).build(); + Settings settings = Settings.builder().put(QUERY_GROUP_COUNT_SETTING_NAME, 2).build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, clusterSettingsSet()); + ClusterService clusterService = new ClusterService(settings, clusterSettings, mock(ThreadPool.class)); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).metadata(metadata).build(); + QueryGroupPersistenceService queryGroupPersistenceService1 = new QueryGroupPersistenceService( + clusterService, + settings, + clusterSettings + ); + assertThrows(RuntimeException.class, () -> queryGroupPersistenceService1.saveQueryGroupInClusterState(toCreate, clusterState)); + } + + /** + * Tests the invalid value of {@code node.query_group.max_count} + */ + public void testInvalidMaxQueryGroupCount() { + Settings settings = Settings.builder().put(QUERY_GROUP_COUNT_SETTING_NAME, 2).build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, clusterSettingsSet()); + ClusterService clusterService = new ClusterService(settings, clusterSettings, mock(ThreadPool.class)); + QueryGroupPersistenceService queryGroupPersistenceService = new QueryGroupPersistenceService( + clusterService, + settings, + clusterSettings + ); + assertThrows(IllegalArgumentException.class, () -> queryGroupPersistenceService.setMaxQueryGroupCount(-1)); + } + + /** + * Tests the valid value of {@code node.query_group.max_count} + */ + public void testValidMaxSandboxCountSetting() { + Settings settings = Settings.builder().put(QUERY_GROUP_COUNT_SETTING_NAME, 100).build(); + ClusterService clusterService = new ClusterService(settings, clusterSettings(), mock(ThreadPool.class)); + QueryGroupPersistenceService queryGroupPersistenceService = new QueryGroupPersistenceService( + clusterService, + settings, + clusterSettings() + ); + queryGroupPersistenceService.setMaxQueryGroupCount(50); + assertEquals(50, queryGroupPersistenceService.getMaxQueryGroupCount()); + } + + /** + * Tests PersistInClusterStateMetadata function + */ + public void testPersistInClusterStateMetadata() { + ClusterService clusterService = mock(ClusterService.class); + @SuppressWarnings("unchecked") + ActionListener listener = mock(ActionListener.class); + QueryGroupPersistenceService queryGroupPersistenceService = new QueryGroupPersistenceService( + clusterService, + QueryGroupTestUtils.settings(), + clusterSettings() + ); + queryGroupPersistenceService.persistInClusterStateMetadata(queryGroupOne, listener); + verify(clusterService).submitStateUpdateTask(eq(SOURCE), any()); + } + + /** + * Tests PersistInClusterStateMetadata function with inner functions + */ + public void testPersistInClusterStateMetadataInner() { + ClusterService clusterService = mock(ClusterService.class); + @SuppressWarnings("unchecked") + ActionListener listener = mock(ActionListener.class); + QueryGroupPersistenceService queryGroupPersistenceService = new QueryGroupPersistenceService( + clusterService, + QueryGroupTestUtils.settings(), + clusterSettings() + ); + ArgumentCaptor captor = ArgumentCaptor.forClass(ClusterStateUpdateTask.class); + queryGroupPersistenceService.persistInClusterStateMetadata(queryGroupOne, listener); + verify(clusterService, times(1)).submitStateUpdateTask(eq(SOURCE), captor.capture()); + ClusterStateUpdateTask capturedTask = captor.getValue(); + assertEquals(queryGroupPersistenceService.createQueryGroupThrottlingKey, capturedTask.getClusterManagerThrottlingKey()); + + doAnswer(invocation -> { + ClusterStateUpdateTask task = invocation.getArgument(1); + task.clusterStateProcessed(SOURCE, mock(ClusterState.class), mock(ClusterState.class)); + return null; + }).when(clusterService).submitStateUpdateTask(anyString(), any()); + queryGroupPersistenceService.persistInClusterStateMetadata(queryGroupOne, listener); + verify(listener).onResponse(any(CreateQueryGroupResponse.class)); + } + + /** + * Tests PersistInClusterStateMetadata function with failure + */ + public void testPersistInClusterStateMetadataFailure() { + ClusterService clusterService = mock(ClusterService.class); + @SuppressWarnings("unchecked") + ActionListener listener = mock(ActionListener.class); + QueryGroupPersistenceService queryGroupPersistenceService = new QueryGroupPersistenceService( + clusterService, + QueryGroupTestUtils.settings(), + clusterSettings() + ); + doAnswer(invocation -> { + ClusterStateUpdateTask task = invocation.getArgument(1); + Exception exception = new RuntimeException("Test Exception"); + task.onFailure(SOURCE, exception); + return null; + }).when(clusterService).submitStateUpdateTask(anyString(), any()); + queryGroupPersistenceService.persistInClusterStateMetadata(queryGroupOne, listener); + verify(listener).onFailure(any(RuntimeException.class)); + } + + /** + * Tests getting a single QueryGroup + */ + public void testGetSingleQueryGroup() { + Collection groupsCollections = QueryGroupPersistenceService.getFromClusterStateMetadata(NAME_ONE, clusterState()); + List groups = new ArrayList<>(groupsCollections); + assertEquals(1, groups.size()); + QueryGroup queryGroup = groups.get(0); + List listOne = new ArrayList<>(); + List listTwo = new ArrayList<>(); + listOne.add(QueryGroupTestUtils.queryGroupOne); + listTwo.add(queryGroup); + QueryGroupTestUtils.assertEqualQueryGroups(listOne, listTwo); + } + + /** + * Tests getting all QueryGroups + */ + public void testGetAllQueryGroups() { + assertEquals(2, QueryGroupTestUtils.clusterState().metadata().queryGroups().size()); + Collection groupsCollections = QueryGroupPersistenceService.getFromClusterStateMetadata(null, clusterState()); + List res = new ArrayList<>(groupsCollections); + assertEquals(2, res.size()); + Set currentNAME = res.stream().map(QueryGroup::getName).collect(Collectors.toSet()); + assertTrue(currentNAME.contains(QueryGroupTestUtils.NAME_ONE)); + assertTrue(currentNAME.contains(QueryGroupTestUtils.NAME_TWO)); + QueryGroupTestUtils.assertEqualQueryGroups(QueryGroupTestUtils.queryGroupList(), res); + } + + /** + * Tests getting a QueryGroup with invalid name + */ + public void testGetNonExistedQueryGroups() { + Collection groupsCollections = QueryGroupPersistenceService.getFromClusterStateMetadata( + NAME_NONE_EXISTED, + clusterState() + ); + List groups = new ArrayList<>(groupsCollections); + assertEquals(0, groups.size()); + } + + /** + * Tests setting maxQueryGroupCount + */ + public void testMaxQueryGroupCount() { + assertThrows(IllegalArgumentException.class, () -> QueryGroupTestUtils.queryGroupPersistenceService().setMaxQueryGroupCount(-1)); + QueryGroupPersistenceService queryGroupPersistenceService = QueryGroupTestUtils.queryGroupPersistenceService(); + queryGroupPersistenceService.setMaxQueryGroupCount(50); + assertEquals(50, queryGroupPersistenceService.getMaxQueryGroupCount()); + } + + /** + * Tests delete a single QueryGroup + */ + public void testDeleteSingleQueryGroup() { + ClusterState newClusterState = queryGroupPersistenceService().deleteQueryGroupInClusterState(NAME_TWO, clusterState()); + Map afterDeletionGroups = newClusterState.getMetadata().queryGroups(); + assertFalse(afterDeletionGroups.containsKey(_ID_TWO)); + assertEquals(1, afterDeletionGroups.size()); + List oldQueryGroups = new ArrayList<>(); + oldQueryGroups.add(queryGroupOne); + assertEqualQueryGroups(new ArrayList<>(afterDeletionGroups.values()), oldQueryGroups); + } + + /** + * Tests delete a QueryGroup with invalid name + */ + public void testDeleteNonExistedQueryGroup() { + assertThrows( + ResourceNotFoundException.class, + () -> queryGroupPersistenceService().deleteQueryGroupInClusterState(NAME_NONE_EXISTED, clusterState()) + ); + } + + /** + * Tests DeleteInClusterStateMetadata function + */ + @SuppressWarnings("unchecked") + public void testDeleteInClusterStateMetadata() throws Exception { + DeleteQueryGroupRequest request = new DeleteQueryGroupRequest(NAME_ONE); + ClusterService clusterService = mock(ClusterService.class); + + ActionListener listener = mock(ActionListener.class); + QueryGroupPersistenceService queryGroupPersistenceService = new QueryGroupPersistenceService( + clusterService, + QueryGroupTestUtils.settings(), + clusterSettings() + ); + doAnswer(invocation -> { + AckedClusterStateUpdateTask task = invocation.getArgument(1); + ClusterState initialState = clusterState(); + ClusterState newState = task.execute(initialState); + assertNotNull(newState); + assertEquals(queryGroupPersistenceService.deleteQueryGroupThrottlingKey, task.getClusterManagerThrottlingKey()); + task.onAllNodesAcked(null); + verify(listener).onResponse(argThat(response -> response.isAcknowledged())); + return null; + }).when(clusterService).submitStateUpdateTask(anyString(), any()); + queryGroupPersistenceService.deleteInClusterStateMetadata(request, listener); + verify(clusterService).submitStateUpdateTask(eq(SOURCE), any(AckedClusterStateUpdateTask.class)); + } +} diff --git a/plugins/workload-management/src/yamlRestTest/java/org/opensearch/plugin/wlm/WorkloadManagementClientYamlTestSuiteIT.java b/plugins/workload-management/src/yamlRestTest/java/org/opensearch/plugin/wlm/WorkloadManagementClientYamlTestSuiteIT.java new file mode 100644 index 0000000000000..9ec4a36ff6a5b --- /dev/null +++ b/plugins/workload-management/src/yamlRestTest/java/org/opensearch/plugin/wlm/WorkloadManagementClientYamlTestSuiteIT.java @@ -0,0 +1,52 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.plugin.wlm; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.opensearch.test.rest.yaml.ClientYamlTestCandidate; +import org.opensearch.test.rest.yaml.OpenSearchClientYamlSuiteTestCase; + +/** Runs yaml rest tests */ +public class WorkloadManagementClientYamlTestSuiteIT extends OpenSearchClientYamlSuiteTestCase { + + public WorkloadManagementClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @ParametersFactory + public static Iterable parameters() throws Exception { + return OpenSearchClientYamlSuiteTestCase.createParameters(); + } +} diff --git a/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/create_query_group_context.json b/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/create_query_group_context.json new file mode 100644 index 0000000000000..bb4620c01f2d6 --- /dev/null +++ b/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/create_query_group_context.json @@ -0,0 +1,18 @@ +{ + "create_query_group_context": { + "stability": "experimental", + "url": { + "paths": [ + { + "path": "/_wlm/query_group", + "methods": ["PUT", "POST"], + "parts": {} + } + ] + }, + "params":{}, + "body":{ + "description":"The QueryGroup schema" + } + } +} diff --git a/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/delete_query_group_context.json b/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/delete_query_group_context.json new file mode 100644 index 0000000000000..16930427fc2fe --- /dev/null +++ b/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/delete_query_group_context.json @@ -0,0 +1,22 @@ +{ + "delete_query_group_context": { + "stability": "experimental", + "url": { + "paths": [ + { + "path": "/_wlm/query_group/{name}", + "methods": [ + "DELETE" + ], + "parts": { + "name": { + "type": "string", + "description": "QueryGroup name" + } + } + } + ] + }, + "params":{} + } +} diff --git a/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/get_query_group_context.json b/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/get_query_group_context.json new file mode 100644 index 0000000000000..e0d552be616b2 --- /dev/null +++ b/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/get_query_group_context.json @@ -0,0 +1,25 @@ +{ + "get_query_group_context": { + "stability": "experimental", + "url": { + "paths": [ + { + "path": "/_wlm/query_group", + "methods": ["GET"], + "parts": {} + }, + { + "path": "/_wlm/query_group/{name}", + "methods": ["GET"], + "parts": { + "name": { + "type": "string", + "description": "QueryGroup name" + } + } + } + ] + }, + "params":{} + } +} diff --git a/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/test/wlm/10_query_group.yml b/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/test/wlm/10_query_group.yml new file mode 100644 index 0000000000000..a00314986a5cf --- /dev/null +++ b/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/test/wlm/10_query_group.yml @@ -0,0 +1,114 @@ +"test CRUD Operations for QueryGroup API ": + - skip: + version: " - 2.16.99" + reason: "QueryGroup WorkloadManagement feature was added in 2.17" + + - do: + create_query_group_context: + body: + { + "name": "analytics", + "resiliency_mode": "monitor", + "resource_limits": { + "cpu": 0.4, + "memory": 0.2 + } + } + + - match: { name: "analytics" } + - match: { resiliency_mode: "monitor" } + - match: { resource_limits.cpu: 0.4 } + - match: { resource_limits.memory: 0.2 } + + - do: + get_query_group_context: + name: "analytics" + + - match: { query_groups.0.name: "analytics" } + - match: { query_groups.0.resiliency_mode: "monitor" } + - match: { query_groups.0.resource_limits.cpu: 0.4 } + - match: { query_groups.0.resource_limits.memory: 0.2 } + + - do: + catch: /illegal_argument_exception/ + create_query_group_context: + body: + { + "name": "analytics", + "resiliency_mode": "monitor", + "resource_limits": { + "cpu": 0.4, + "memory": 0.2 + } + } + + - do: + catch: /illegal_argument_exception/ + create_query_group_context: + body: + { + "name": "analytics2", + "resiliency_mode": "monitor", + "resource_limits": { + "cpu": 0.61, + "memory": 0.2 + } + } + + - do: + catch: /illegal_argument_exception/ + create_query_group_context: + body: + { + "name": "analytics2", + "resiliency_mode": "monitor", + "resource_limits": { + "cpu": -0.1, + "memory": 0.2 + } + } + + - do: + catch: /illegal_argument_exception/ + create_query_group_context: + body: + { + "name": "", + "resiliency_mode": "monitor", + "resource_limits": { + "cpu": 0.1, + "memory": 0.2 + } + } + + - do: + create_query_group_context: + body: + { + "name": "analytics2", + "resiliency_mode": "monitor", + "resource_limits": { + "cpu": 0.35, + "memory": 0.25 + } + } + + - match: { name: "analytics2" } + - match: { resiliency_mode: "monitor" } + - match: { resource_limits.cpu: 0.35 } + - match: { resource_limits.memory: 0.25 } + + - do: + get_query_group_context: + name: "analytics2" + + - match: { query_groups.0.name: "analytics2" } + - match: { query_groups.0.resiliency_mode: "monitor" } + - match: { query_groups.0.resource_limits.cpu: 0.35 } + - match: { query_groups.0.resource_limits.memory: 0.25 } + + - do: + delete_query_group_context: + name: "analytics2" + + - match: { acknowledged: true } diff --git a/qa/os/src/test/java/org/opensearch/packaging/util/FileUtils.java b/qa/os/src/test/java/org/opensearch/packaging/util/FileUtils.java index 5169ce18fff79..dd5248738569e 100644 --- a/qa/os/src/test/java/org/opensearch/packaging/util/FileUtils.java +++ b/qa/os/src/test/java/org/opensearch/packaging/util/FileUtils.java @@ -380,7 +380,7 @@ public static String escapePath(Path path) { } /** - * Recursively copy the the source directory to the target directory, preserving permissions. + * Recursively copy the source directory to the target directory, preserving permissions. */ public static void copyDirectory(Path source, Path target) throws IOException { Files.walkFileTree(source, new SimpleFileVisitor() { diff --git a/qa/smoke-test-http/build.gradle b/qa/smoke-test-http/build.gradle index f48ddc26d929b..496fda6bb717d 100644 --- a/qa/smoke-test-http/build.gradle +++ b/qa/smoke-test-http/build.gradle @@ -35,6 +35,7 @@ apply plugin: 'opensearch.test-with-dependencies' dependencies { testImplementation project(path: ':modules:transport-netty4') // for http + testImplementation project(path: ':plugins:transport-reactor-netty4') // for http testImplementation project(path: ':plugins:transport-nio') testImplementation project(path: ':plugins:identity-shiro') // for http } diff --git a/qa/smoke-test-http/src/test/java/org/opensearch/http/DanglingIndicesRestIT.java b/qa/smoke-test-http/src/test/java/org/opensearch/http/DanglingIndicesRestIT.java index 42c7fd667fd8f..741660f972bfb 100644 --- a/qa/smoke-test-http/src/test/java/org/opensearch/http/DanglingIndicesRestIT.java +++ b/qa/smoke-test-http/src/test/java/org/opensearch/http/DanglingIndicesRestIT.java @@ -152,8 +152,8 @@ public void testDanglingIndicesCanBeImported() throws Exception { * 1, then create two indices and delete them both while one node in * the cluster is stopped. The deletion of the second pushes the deletion * of the first out of the graveyard. When the stopped node is resumed, - * only the second index will be found into the graveyard and the the - * other will be considered dangling, and can therefore be listed and + * only the second index will be found into the graveyard and the other + * will be considered dangling, and can therefore be listed and * deleted through the API */ public void testDanglingIndicesCanBeDeleted() throws Exception { diff --git a/qa/smoke-test-http/src/test/java/org/opensearch/http/HttpSmokeTestCase.java b/qa/smoke-test-http/src/test/java/org/opensearch/http/HttpSmokeTestCase.java index 08974b902c418..6d8e80a0a63ea 100644 --- a/qa/smoke-test-http/src/test/java/org/opensearch/http/HttpSmokeTestCase.java +++ b/qa/smoke-test-http/src/test/java/org/opensearch/http/HttpSmokeTestCase.java @@ -38,6 +38,7 @@ import org.opensearch.transport.Netty4ModulePlugin; import org.opensearch.transport.nio.MockNioTransportPlugin; import org.opensearch.transport.nio.NioTransportPlugin; +import org.opensearch.transport.reactor.ReactorNetty4Plugin; import org.junit.BeforeClass; import java.util.Arrays; @@ -53,7 +54,7 @@ public abstract class HttpSmokeTestCase extends OpenSearchIntegTestCase { @BeforeClass public static void setUpTransport() { nodeTransportTypeKey = getTypeKey(randomFrom(getTestTransportPlugin(), Netty4ModulePlugin.class, NioTransportPlugin.class)); - nodeHttpTypeKey = getHttpTypeKey(randomFrom(Netty4ModulePlugin.class, NioTransportPlugin.class)); + nodeHttpTypeKey = getHttpTypeKey(randomFrom(Netty4ModulePlugin.class, NioTransportPlugin.class, ReactorNetty4Plugin.class)); clientTypeKey = getTypeKey(randomFrom(getTestTransportPlugin(), Netty4ModulePlugin.class, NioTransportPlugin.class)); } @@ -71,6 +72,8 @@ private static String getTypeKey(Class clazz) { private static String getHttpTypeKey(Class clazz) { if (clazz.equals(NioTransportPlugin.class)) { return NioTransportPlugin.NIO_HTTP_TRANSPORT_NAME; + } else if (clazz.equals(ReactorNetty4Plugin.class)) { + return ReactorNetty4Plugin.REACTOR_NETTY_HTTP_TRANSPORT_NAME; } else { assert clazz.equals(Netty4ModulePlugin.class); return Netty4ModulePlugin.NETTY_HTTP_TRANSPORT_NAME; @@ -92,7 +95,7 @@ protected Settings nodeSettings(int nodeOrdinal) { @Override protected Collection> nodePlugins() { - return Arrays.asList(getTestTransportPlugin(), Netty4ModulePlugin.class, NioTransportPlugin.class); + return Arrays.asList(getTestTransportPlugin(), Netty4ModulePlugin.class, NioTransportPlugin.class, ReactorNetty4Plugin.class); } @Override diff --git a/qa/smoke-test-http/src/test/java/org/opensearch/http/IdentityAuthenticationIT.java b/qa/smoke-test-http/src/test/java/org/opensearch/http/IdentityAuthenticationIT.java index 78398e10b9ce8..1a806b033eb8a 100644 --- a/qa/smoke-test-http/src/test/java/org/opensearch/http/IdentityAuthenticationIT.java +++ b/qa/smoke-test-http/src/test/java/org/opensearch/http/IdentityAuthenticationIT.java @@ -26,6 +26,8 @@ import org.opensearch.test.OpenSearchTestCase; import org.opensearch.transport.Netty4ModulePlugin; import org.opensearch.transport.nio.NioTransportPlugin; +import org.opensearch.transport.reactor.ReactorNetty4Plugin; + import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.core.StringContains.containsString; @@ -42,7 +44,7 @@ protected Settings nodeSettings(int nodeOrdinal) { @Override protected Collection> nodePlugins() { - return Arrays.asList(OpenSearchTestCase.getTestTransportPlugin(), Netty4ModulePlugin.class, NioTransportPlugin.class, ShiroIdentityPlugin.class); + return Arrays.asList(OpenSearchTestCase.getTestTransportPlugin(), Netty4ModulePlugin.class, NioTransportPlugin.class, ReactorNetty4Plugin.class, ShiroIdentityPlugin.class); } diff --git a/qa/wildfly/src/main/webapp/WEB-INF/jboss-deployment-structure.xml b/qa/wildfly/src/main/webapp/WEB-INF/jboss-deployment-structure.xml index a08090100989a..4fabd038cf915 100644 --- a/qa/wildfly/src/main/webapp/WEB-INF/jboss-deployment-structure.xml +++ b/qa/wildfly/src/main/webapp/WEB-INF/jboss-deployment-structure.xml @@ -3,5 +3,8 @@ + + + diff --git a/release-notes/opensearch.release-notes-1.3.19.md b/release-notes/opensearch.release-notes-1.3.19.md new file mode 100644 index 0000000000000..fe62624fc6362 --- /dev/null +++ b/release-notes/opensearch.release-notes-1.3.19.md @@ -0,0 +1,5 @@ +## 2024-08-22 Version 1.3.19 Release Notes + +### Upgrades +- OpenJDK Update (July 2024 Patch releases) ([#15002](https://github.com/opensearch-project/OpenSearch/pull/15002)) +- Bump `netty` from 4.1.111.Final to 4.1.112.Final ([#15081](https://github.com/opensearch-project/OpenSearch/pull/15081)) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/100_partial_flat_object.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/100_partial_flat_object.yml index 91e4127da9c32..e1bc86f1c9f3d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/100_partial_flat_object.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/100_partial_flat_object.yml @@ -88,7 +88,16 @@ setup: } ] } } - + - do: + index: + index: test_partial_flat_object + id: 4 + body: { + "issue": { + "number": 999, + "labels": null + } + } - do: indices.refresh: index: test_partial_flat_object @@ -135,7 +144,7 @@ teardown: } } - - length: { hits.hits: 3 } + - length: { hits.hits: 4 } # Match Query with exact dot path. - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/110_constant_keyword.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/110_constant_keyword.yml deleted file mode 100644 index 9864bfbbb26e9..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/110_constant_keyword.yml +++ /dev/null @@ -1,70 +0,0 @@ ---- -# The test setup includes: -# - Create index with constant_keyword field type -# - Check mapping -# - Index two example documents -# - Search -# - Delete Index when connection is teardown - -"Mappings and Supported queries": - - skip: - version: " - 2.99.99" - reason: "fixed in 3.0.0" - - # Create index with constant_keyword field type - - do: - indices.create: - index: test - body: - mappings: - properties: - genre: - type: "constant_keyword" - value: "1" - - # Index document - - do: - index: - index: test - id: 1 - body: { - "genre": "1" - } - - - do: - index: - index: test - id: 2 - body: { - "genre": 1 - } - - - do: - indices.refresh: - index: test - - # Check mapping - - do: - indices.get_mapping: - index: test - - is_true: test.mappings - - match: { test.mappings.properties.genre.type: constant_keyword } - - length: { test.mappings.properties.genre: 2 } - - # Verify Document Count - - do: - search: - body: { - query: { - match_all: {} - } - } - - - length: { hits.hits: 2 } - - match: { hits.hits.0._source.genre: "1" } - - match: { hits.hits.1._source.genre: 1 } - - # Delete Index when connection is teardown - - do: - indices.delete: - index: test diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/115_constant_keyword.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/115_constant_keyword.yml new file mode 100644 index 0000000000000..e60981dbbf50c --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/115_constant_keyword.yml @@ -0,0 +1,332 @@ +# The test setup includes two parts: +# part1: test mapping and indexing +# part2: test query +--- +"Mappings and Indexing": + - skip: + version: " - 2.15.99" + reason: "fixed in 2.16.0" + + # Create indices with constant_keyword field type + - do: + indices.create: + index: test + body: + mappings: + properties: + genre: + type: "constant_keyword" + value: "1" + + # Index documents to test integer and string are both ok. + - do: + index: + index: test + id: 1 + body: { + "genre": "1" + } + + - do: + index: + index: test + id: 2 + body: { + "genre": 1 + } + + # Refresh + - do: + indices.refresh: + index: test + + # Check mapping + - do: + indices.get_mapping: + index: test + - is_true: test.mappings + - match: { test.mappings.properties.genre.type: constant_keyword } + - length: { test.mappings.properties.genre: 2 } + + # Verify Document Count + - do: + search: + index: test + body: { + query: { + match_all: {} + } + } + + - length: { hits.hits: 2 } + - match: { hits.hits.0._source.genre: "1" } + - match: { hits.hits.1._source.genre: 1 } + + # Delete Index when connection is teardown + - do: + indices.delete: + index: test + +--- +"Queries": + - skip: + version: " - 2.16.99" + reason: "rangeQuery and regexpQuery are introduced in 2.17.0" + + - do: + indices.create: + index: test1 + body: + mappings: + properties: + genre: + type: "constant_keyword" + value: "d3efault" + + # Index documents to test query. + - do: + index: + index: test1 + id: 1 + body: { + "genre": "d3efault" + } + + # Refresh + - do: + indices.refresh: + index: test1 + + # Test rangeQuery + - do: + search: + index: test1 + body: { + query: { + range: { + genre: { + gte: "d3efault" + } + } + } + } + + - length: { hits.hits: 1 } + + - do: + search: + index: test1 + body: { + query: { + range: { + genre: { + from: "d3efault", + "include_lower": "false" + } + } + } + } + + - length: { hits.hits: 0 } + + - do: + search: + index: test1 + body: { + query: { + range: { + genre: { + lte: "d3efault" + } + } + } + } + + - length: { hits.hits: 1 } + + - do: + search: + index: test1 + body: { + query: { + range: { + genre: { + to: "d3efault", + include_upper: "false" + } + } + } + } + + - length: { hits.hits: 0 } + + - do: + search: + index: test1 + body: { + query: { + range: { + genre: { + from: "d3efault", + to: "d3efault", + include_lower: "false", + include_upper: "true" + } + } + } + } + + - length: { hits.hits: 0 } + + - do: + search: + index: test1 + body: { + query: { + range: { + genre: { + from: "d3efault", + to: "d3efault", + include_lower: "true", + include_upper: "false" + } + } + } + } + + - length: { hits.hits: 0 } + + - do: + search: + index: test1 + body: { + query: { + range: { + genre: { + from: null, + to: null + } + } + } + } + + - length: { hits.hits: 1 } + + - do: + search: + index: test1 + body: { + query: { + range: { + genre: { + from: "d3efault", + to: "d3efault", + include_lower: "true", + include_upper: "true" + } + } + } + } + + - length: { hits.hits: 1 } + + - do: + search: + index: test1 + body: { + query: { + range: { + genre: { + from: "d3efaul", + to: "d3efault1", + include_lower: "true", + include_upper: "true" + } + } + } + } + + - length: { hits.hits: 1 } + + # Test regexpQuery + - do: + search: + index: test1 + body: { + query: { + regexp: { + "genre":"d.*" + } + } + } + + - length: { hits.hits: 1 } + + - do: + search: + index: test1 + body: { + query: { + regexp: { + "genre":"d\\defau[a-z]?t" + } + } + } + + - length: { hits.hits: 1 } + + - do: + search: + index: test1 + body: { + query: { + regexp: { + "genre":"d\\defa[a-z]?t" + } + } + } + + - length: { hits.hits: 0 } + + - do: + search: + index: test1 + body: { + query: { + regexp: { + "genre":"d3efa[a-z]{3,3}" + } + } + } + + - length: { hits.hits: 1 } + + - do: + search: + index: test1 + body: { + query: { + regexp: { + "genre":"d3efa[a-z]{4,4}" + } + } + } + + - length: { hits.hits: 0 } + + - do: + search: + index: test1 + body: { + query: { + match_all: {} + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.genre: "d3efault" } + + # Delete Index when connection is teardown + - do: + indices.delete: + index: test1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/120_field_name.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/120_field_name.yml new file mode 100644 index 0000000000000..040e883b4a4c2 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/120_field_name.yml @@ -0,0 +1,38 @@ +--- +"Index documents with field name containing only dot fail with an IllegalArgumentException": + - skip: + version: " - 2.16.99" + reason: "introduced in 2.17.0" + + - do: + indices.create: + index: test_1 + + - do: + catch: /field name cannot contain only the character \[.\]/ + index: + index: test_1 + id: 1 + body: { + .: bar + } + + - do: + catch: /field name cannot contain only the character \[.\]/ + index: + index: test_1 + id: 1 + body: { + ..: bar + } + + - do: + catch: /field name cannot contain only the character \[.\]/ + index: + index: test_1 + id: 1 + body: { + foo: { + .: bar + } + } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.delete_index_template/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.delete_index_template/10_basic.yml new file mode 100644 index 0000000000000..6239eb7b8cd22 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.delete_index_template/10_basic.yml @@ -0,0 +1,58 @@ +setup: + - skip: + features: allowed_warnings + - do: + allowed_warnings: + - "index template [test_template_1] has index patterns [test-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [test_template_1] will take precedence during new index creation" + indices.put_index_template: + name: test_template_1 + body: + index_patterns: test-* + template: + settings: + number_of_shards: 1 + number_of_replicas: 0 + "priority": 50 + + - do: + allowed_warnings: + - "index template [test_template_2] has index patterns [test-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [test_template_2] will take precedence during new index creation" + indices.put_index_template: + name: test_template_2 + body: + index_patterns: test-* + data_stream: {} + template: + settings: + number_of_shards: 1 + number_of_replicas: 0 + "priority": 51 + +--- +teardown: + - do: + indices.delete_data_stream: + name: test-1 + ignore: 404 + - do: + indices.delete_index_template: + name: test_template_1 + ignore: 404 + - do: + indices.delete_index_template: + name: test_template_2 + ignore: 404 + +--- +"Delete index template which is not used by data stream but index pattern matches": + - skip: + version: " - 2.16.99" + reason: "fixed in 2.17.0" + + - do: + indices.create_data_stream: + name: test-1 + + - do: + indices.delete_index_template: + name: test_template_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/360_date_histogram.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/360_date_histogram.yml index 0ea9d3de00926..8c8a98b2db22c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/360_date_histogram.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/360_date_histogram.yml @@ -61,3 +61,94 @@ setup: - match: { aggregations.histo.buckets.8.doc_count: 1 } - match: { aggregations.histo.buckets.12.key_as_string: "2016-06-01T00:00:00.000Z" } - match: { aggregations.histo.buckets.12.doc_count: 1 } + +--- +"Date histogram aggregation w/ filter query test": + - skip: + version: " - 2.99.99" + reason: Backport fix to 2.16 + + - do: + bulk: + refresh: true + index: dhisto-agg-w-query + body: + - '{"index": {}}' + - '{"routing": "route1", "date": "2024-08-12", "dow": "monday"}' + - '{"index": {}}' + - '{"routing": "route1", "date": "2024-08-14", "dow": "wednesday"}' + - '{"index": {}}' + - '{"routing": "route1", "date": "2024-08-19", "dow": "monday"}' + - '{"index": {}}' + - '{"routing": "route2", "date": "2024-08-13", "dow": "tuesday"}' + - '{"index": {}}' + - '{"routing": "route2", "date": "2024-08-15", "dow": "thursday"}' + + - do: + search: + index: dhisto-agg-w-query + body: + query: + bool: + must: + match_all: {} + filter: + - terms: + routing: + - "route1" + aggregations: + weekHisto: + date_histogram: + field: date + calendar_interval: week + _source: false + + - match: { hits.total.value: 3 } + - match: { aggregations.weekHisto.buckets.0.doc_count: 2 } + - match: { aggregations.weekHisto.buckets.1.doc_count: 1 } + +--- +"Date histogram aggregation w/ shared field range test": + - do: + bulk: + refresh: true + index: dhisto-agg-w-query + body: + - '{"index": {}}' + - '{"date": "2024-10-31"}' + - '{"index": {}}' + - '{"date": "2024-11-11"}' + - '{"index": {}}' + - '{"date": "2024-11-28"}' + - '{"index": {}}' + - '{"date": "2024-12-25"}' + - '{"index": {}}' + - '{"date": "2025-01-01"}' + - '{"index": {}}' + - '{"date": "2025-02-14"}' + + - do: + search: + index: dhisto-agg-w-query + body: + profile: true + query: + range: + date: + gte: "2024-01-01" + lt: "2025-01-01" + aggregations: + monthHisto: + date_histogram: + field: date + calendar_interval: month + _source: false + + - match: { hits.total.value: 4 } + - match: { aggregations.monthHisto.buckets.0.doc_count: 1 } + - match: { aggregations.monthHisto.buckets.1.doc_count: 2 } + - match: { aggregations.monthHisto.buckets.2.doc_count: 1 } + - match: { profile.shards.0.aggregations.0.debug.optimized_segments: 1 } + - match: { profile.shards.0.aggregations.0.debug.unoptimized_segments: 0 } + - match: { profile.shards.0.aggregations.0.debug.leaf_visited: 0 } + - match: { profile.shards.0.aggregations.0.debug.inner_visited: 0 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/40_range.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/40_range.yml index 80aad96ce1f6b..1e1d2b0706d6b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/40_range.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/40_range.yml @@ -673,3 +673,82 @@ setup: - match: { aggregations.my_range.buckets.3.from: 1.5 } - is_false: aggregations.my_range.buckets.3.to - match: { aggregations.my_range.buckets.3.doc_count: 2 } + +--- +"Filter query w/ aggregation test": + - skip: + version: " - 2.99.99" + reason: Backport fix to 2.16 + + - do: + bulk: + refresh: true + index: range-agg-w-query + body: + - '{"index": {}}' + - '{"routing": "route1", "v": -10, "date": "2024-10-29"}' + - '{"index": {}}' + - '{"routing": "route1", "v": -5, "date": "2024-10-30"}' + - '{"index": {}}' + - '{"routing": "route1", "v": 10, "date": "2024-10-31"}' + - '{"index": {}}' + - '{"routing": "route2", "v": 15, "date": "2024-11-01"}' + - '{"index": {}}' + - '{"routing": "route2", "v": 20, "date": "2024-11-02"}' + + - do: + search: + index: range-agg-w-query + body: + query: + bool: + must: + match_all: {} + filter: + - terms: + routing: + - "route1" + aggregations: + NegPosAgg: + range: + field: v + keyed: true + ranges: + - to: 0 + key: "0" + - from: 0 + key: "1" + _source: false + + - match: { hits.total.value: 3 } + - match: { aggregations.NegPosAgg.buckets.0.doc_count: 2 } + - match: { aggregations.NegPosAgg.buckets.1.doc_count: 1 } + + - do: + search: + index: range-agg-w-query + body: + query: + bool: + must: + match_all: {} + filter: + - terms: + routing: + - "route1" + aggregations: + HalloweenAgg: + date_range: + field: date + format: "yyyy-MM-dd" + keyed: true + ranges: + - to: "2024-11-01" + key: "to-october" + - from: "2024-11-01" + key: "from-september" + _source: false + + - match: { hits.total.value: 3 } + - match: { aggregations.HalloweenAgg.buckets.to-october.doc_count: 3 } + - match: { aggregations.HalloweenAgg.buckets.from-september.doc_count: 0 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/370_bitmap_filtering.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/370_bitmap_filtering.yml new file mode 100644 index 0000000000000..d728070adb188 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/370_bitmap_filtering.yml @@ -0,0 +1,184 @@ +--- +setup: + - skip: + version: " - 2.99.99" + reason: The bitmap filtering feature is available in 2.17 and later. + - do: + indices.create: + index: students + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + properties: + student_id: + type: integer + - do: + bulk: + refresh: true + body: + - { "index": { "_index": "students", "_id": "1" } } + - { "name": "Jane Doe", "student_id": 111 } + - { "index": { "_index": "students", "_id": "2" } } + - { "name": "Mary Major", "student_id": 222 } + - { "index": { "_index": "students", "_id": "3" } } + - { "name": "John Doe", "student_id": 333 } + - do: + indices.create: + index: classes + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + properties: + enrolled: + type: binary + store: true + - do: + bulk: + refresh: true + body: + - { "index": { "_index": "classes", "_id": "101" } } + - { "enrolled": "OjAAAAEAAAAAAAEAEAAAAG8A3gA=" } # 111,222 + - { "index": { "_index": "classes", "_id": "102" } } + - { "enrolled": "OjAAAAEAAAAAAAAAEAAAAG8A" } # 111 + - { "index": { "_index": "classes", "_id": "103" } } + - { "enrolled": "OjAAAAEAAAAAAAAAEAAAAE0B" } # 333 + - { "index": { "_index": "classes", "_id": "104" } } + - { "enrolled": "OjAAAAEAAAAAAAEAEAAAAN4ATQE=" } # 222,333 + - do: + cluster.health: + wait_for_status: green + +--- +"Terms lookup on a binary field with bitmap": + - do: + search: + rest_total_hits_as_int: true + index: students + body: { + "query": { + "terms": { + "student_id": { + "index": "classes", + "id": "101", + "path": "enrolled", + "store": true + }, + "value_type": "bitmap" + } + } + } + - match: { hits.total: 2 } + - match: { hits.hits.0._source.name: Jane Doe } + - match: { hits.hits.0._source.student_id: 111 } + - match: { hits.hits.1._source.name: Mary Major } + - match: { hits.hits.1._source.student_id: 222 } + +--- +"Terms query accepting bitmap as value": + - do: + search: + rest_total_hits_as_int: true + index: students + body: { + "query": { + "terms": { + "student_id": ["OjAAAAEAAAAAAAEAEAAAAG8A3gA="], + "value_type": "bitmap" + } + } + } + - match: { hits.total: 2 } + - match: { hits.hits.0._source.name: Jane Doe } + - match: { hits.hits.0._source.student_id: 111 } + - match: { hits.hits.1._source.name: Mary Major } + - match: { hits.hits.1._source.student_id: 222 } + +--- +"Boolean must bitmap filtering": + - do: + search: + rest_total_hits_as_int: true + index: students + body: { + "query": { + "bool": { + "must": [ + { + "terms": { + "student_id": { + "index": "classes", + "id": "101", + "path": "enrolled", + "store": true + }, + "value_type": "bitmap" + } + } + ], + "must_not": [ + { + "terms": { + "student_id": { + "index": "classes", + "id": "104", + "path": "enrolled", + "store": true + }, + "value_type": "bitmap" + } + } + ] + } + } + } + - match: { hits.total: 1 } + - match: { hits.hits.0._source.name: Jane Doe } + - match: { hits.hits.0._source.student_id: 111 } + +--- +"Boolean should bitmap filtering": + - do: + search: + rest_total_hits_as_int: true + index: students + body: { + "query": { + "bool": { + "should": [ + { + "terms": { + "student_id": { + "index": "classes", + "id": "101", + "path": "enrolled", + "store": true + }, + "value_type": "bitmap" + } + }, + { + "terms": { + "student_id": { + "index": "classes", + "id": "104", + "path": "enrolled", + "store": true + }, + "value_type": "bitmap" + } + } + ] + } + } + } + - match: { hits.total: 3 } + - match: { hits.hits.0._source.name: Mary Major } + - match: { hits.hits.0._source.student_id: 222 } + - match: { hits.hits.1._source.name: Jane Doe } + - match: { hits.hits.1._source.student_id: 111 } + - match: { hits.hits.2._source.name: John Doe } + - match: { hits.hits.2._source.student_id: 333 } diff --git a/server/build.gradle b/server/build.gradle index 429af5d0ac258..0cc42ad690eab 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -68,6 +68,7 @@ dependencies { api project(':libs:opensearch-x-content') api project(":libs:opensearch-geo") api project(":libs:opensearch-telemetry") + api project(":libs:opensearch-task-commons") compileOnly project(':libs:opensearch-plugin-classloader') @@ -125,6 +126,9 @@ dependencies { api "com.google.protobuf:protobuf-java:${versions.protobuf}" api "jakarta.annotation:jakarta.annotation-api:${versions.jakarta_annotation}" + // https://mvnrepository.com/artifact/org.roaringbitmap/RoaringBitmap + implementation 'org.roaringbitmap:RoaringBitmap:1.2.1' + testImplementation(project(":test:framework")) { // tests use the locally compiled version of server exclude group: 'org.opensearch', module: 'server' diff --git a/server/licenses/RoaringBitmap-1.2.1.jar.sha1 b/server/licenses/RoaringBitmap-1.2.1.jar.sha1 new file mode 100644 index 0000000000000..ef8cd48c7a388 --- /dev/null +++ b/server/licenses/RoaringBitmap-1.2.1.jar.sha1 @@ -0,0 +1 @@ +828eb489b5e8c8762f2471010e9c7f20c7de596d \ No newline at end of file diff --git a/server/licenses/RoaringBitmap-LICENSE.txt b/server/licenses/RoaringBitmap-LICENSE.txt new file mode 100644 index 0000000000000..3bdd0036295a6 --- /dev/null +++ b/server/licenses/RoaringBitmap-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2013-2016 the RoaringBitmap authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/server/licenses/RoaringBitmap-NOTICE.txt b/server/licenses/RoaringBitmap-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/server/licenses/reactor-core-3.5.19.jar.sha1 b/server/licenses/reactor-core-3.5.19.jar.sha1 deleted file mode 100644 index 04b59d2faae04..0000000000000 --- a/server/licenses/reactor-core-3.5.19.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1d49ce1d0df79f28d3927da5f4c46a895b94335f \ No newline at end of file diff --git a/server/licenses/reactor-core-3.5.20.jar.sha1 b/server/licenses/reactor-core-3.5.20.jar.sha1 new file mode 100644 index 0000000000000..0c80be89f66c8 --- /dev/null +++ b/server/licenses/reactor-core-3.5.20.jar.sha1 @@ -0,0 +1 @@ +1fc0f91e2b93778a974339d2c24363d7f34f90b4 \ No newline at end of file diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java index acbd68fff6dd0..009f5111078de 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java @@ -79,7 +79,7 @@ protected boolean forbidPrivateIndexSettings() { @Before public void setup() { - asyncUploadMockFsRepo = true; + asyncUploadMockFsRepo = false; } public void testCreateCloneIndex() { @@ -153,6 +153,7 @@ public void testCreateCloneIndex() { } + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/15056") public void testCreateCloneIndexLowPriorityRateLimit() { Version version = VersionUtils.randomIndexCompatibleVersion(random()); int numPrimaryShards = 1; @@ -280,7 +281,7 @@ public void testCreateCloneIndexFailure() throws ExecutionException, Interrupted throw new RuntimeException(e); } finally { setFailRate(REPOSITORY_NAME, 0); - ensureGreen(); + ensureGreen(TimeValue.timeValueSeconds(40)); // clean up client().admin() .cluster() diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteSplitIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteSplitIndexIT.java index dc3c8793a93f6..928c9e33e19cb 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteSplitIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteSplitIndexIT.java @@ -46,6 +46,7 @@ import org.opensearch.action.index.IndexRequest; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; +import org.opensearch.action.support.IndicesOptions; import org.opensearch.client.Client; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; @@ -69,12 +70,15 @@ import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.VersionUtils; +import org.junit.After; +import org.junit.Before; import java.io.IOException; import java.io.UncheckedIOException; import java.util.Arrays; import java.util.HashSet; import java.util.Set; +import java.util.concurrent.TimeUnit; import java.util.function.BiFunction; import java.util.stream.IntStream; @@ -89,12 +93,32 @@ @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteSplitIndexIT extends RemoteStoreBaseIntegTestCase { + @Before + public void setup() { + asyncUploadMockFsRepo = false; + } @Override protected boolean forbidPrivateIndexSettings() { return false; } + @After + public void cleanUp() throws Exception { + // Delete is async. + assertAcked( + client().admin().indices().prepareDelete("*").setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_CLOSED_HIDDEN).get() + ); + assertBusy(() -> { + try { + assertEquals(0, getFileCount(translogRepoPath)); + } catch (IOException e) { + fail(); + } + }, 30, TimeUnit.SECONDS); + super.teardown(); + } + public Settings indexSettings() { return Settings.builder() .put(super.indexSettings()) diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java index 4085cc3890f30..bcf23a37c0010 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java @@ -57,6 +57,7 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.cluster.routing.UnassignedInfo; +import org.opensearch.cluster.routing.allocation.AllocateUnassignedDecision; import org.opensearch.cluster.routing.allocation.AllocationDecision; import org.opensearch.cluster.routing.allocation.ExistingShardsAllocator; import org.opensearch.cluster.service.ClusterService; @@ -797,11 +798,26 @@ public void testBatchModeEnabledWithoutTimeout() throws Exception { ); assertTrue(ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_BATCH_MODE.get(internalCluster().clusterService().getSettings())); assertEquals(1, gatewayAllocator.getNumberOfStartedShardBatches()); - assertEquals(1, gatewayAllocator.getNumberOfStoreShardBatches()); + // Replica shard would be marked ineligible since there are no data nodes. + // It would then be removed from any batch and batches would get deleted, so we would have 0 replica batches + assertEquals(0, gatewayAllocator.getNumberOfStoreShardBatches()); - // Now start both data nodes and ensure batch mode is working - logger.info("--> restarting the stopped nodes"); + // Now start one data node + logger.info("--> restarting the first stopped node"); internalCluster().startDataOnlyNode(Settings.builder().put("node.name", dataOnlyNodes.get(0)).put(node0DataPathSettings).build()); + ensureStableCluster(2); + ensureYellow("test"); + assertEquals(0, gatewayAllocator.getNumberOfStartedShardBatches()); + assertEquals(0, gatewayAllocator.getNumberOfStoreShardBatches()); + assertEquals(0, gatewayAllocator.getNumberOfInFlightFetches()); + + // calling reroute and asserting on reroute response + logger.info("--> calling reroute while cluster is yellow"); + clusterRerouteResponse = client().admin().cluster().prepareReroute().setRetryFailed(true).get(); + assertTrue(clusterRerouteResponse.isAcknowledged()); + + // Now start last data node and ensure batch mode is working and cluster goes green + logger.info("--> restarting the second stopped node"); internalCluster().startDataOnlyNode(Settings.builder().put("node.name", dataOnlyNodes.get(1)).put(node1DataPathSettings).build()); ensureStableCluster(3); ensureGreen("test"); @@ -842,11 +858,26 @@ public void testBatchModeEnabledWithSufficientTimeoutAndClusterGreen() throws Ex ); assertTrue(ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_BATCH_MODE.get(internalCluster().clusterService().getSettings())); assertEquals(1, gatewayAllocator.getNumberOfStartedShardBatches()); - assertEquals(1, gatewayAllocator.getNumberOfStoreShardBatches()); + // Replica shard would be marked ineligible since there are no data nodes. + // It would then be removed from any batch and batches would get deleted, so we would have 0 replica batches + assertEquals(0, gatewayAllocator.getNumberOfStoreShardBatches()); - // Now start both data nodes and ensure batch mode is working - logger.info("--> restarting the stopped nodes"); + // Now start one data nodes and ensure batch mode is working + logger.info("--> restarting the first stopped node"); internalCluster().startDataOnlyNode(Settings.builder().put("node.name", dataOnlyNodes.get(0)).put(node0DataPathSettings).build()); + ensureStableCluster(2); + ensureYellow("test"); + assertEquals(0, gatewayAllocator.getNumberOfStartedShardBatches()); + assertEquals(0, gatewayAllocator.getNumberOfStoreShardBatches()); + assertEquals(0, gatewayAllocator.getNumberOfInFlightFetches()); + + // calling reroute and asserting on reroute response + logger.info("--> calling reroute while cluster is yellow"); + clusterRerouteResponse = client().admin().cluster().prepareReroute().setRetryFailed(true).get(); + assertTrue(clusterRerouteResponse.isAcknowledged()); + + // Now start last data node and ensure batch mode is working and cluster goes green + logger.info("--> restarting the second stopped node"); internalCluster().startDataOnlyNode(Settings.builder().put("node.name", dataOnlyNodes.get(1)).put(node1DataPathSettings).build()); ensureStableCluster(3); ensureGreen("test"); @@ -855,7 +886,7 @@ public void testBatchModeEnabledWithSufficientTimeoutAndClusterGreen() throws Ex assertEquals(0, gatewayAllocator.getNumberOfInFlightFetches()); } - public void testBatchModeEnabledWithInSufficientTimeoutButClusterGreen() throws Exception { + public void testBatchModeEnabledWithDisabledTimeoutAndClusterGreen() throws Exception { internalCluster().startClusterManagerOnlyNodes( 1, @@ -889,8 +920,8 @@ public void testBatchModeEnabledWithInSufficientTimeoutButClusterGreen() throws .put("node.name", clusterManagerName) .put(clusterManagerDataPathSettings) .put(ShardsBatchGatewayAllocator.GATEWAY_ALLOCATOR_BATCH_SIZE.getKey(), 5) - .put(ShardsBatchGatewayAllocator.PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING.getKey(), "10ms") - .put(ShardsBatchGatewayAllocator.REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING.getKey(), "10ms") + .put(ShardsBatchGatewayAllocator.PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING.getKey(), "-1") + .put(ShardsBatchGatewayAllocator.REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING.getKey(), "-1") .put(ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_BATCH_MODE.getKey(), true) .build() ); @@ -907,7 +938,9 @@ public void testBatchModeEnabledWithInSufficientTimeoutButClusterGreen() throws assertTrue(ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_BATCH_MODE.get(internalCluster().clusterService().getSettings())); assertEquals(10, gatewayAllocator.getNumberOfStartedShardBatches()); - assertEquals(10, gatewayAllocator.getNumberOfStoreShardBatches()); + // All replica shards would be marked ineligible since there are no data nodes. + // They would then be removed from any batch and batches would get deleted, so we would have 0 replica batches + assertEquals(0, gatewayAllocator.getNumberOfStoreShardBatches()); health = client(internalCluster().getClusterManagerName()).admin().cluster().health(Requests.clusterHealthRequest()).actionGet(); assertFalse(health.isTimedOut()); assertEquals(RED, health.getStatus()); @@ -1051,6 +1084,18 @@ public void testMultipleReplicaShardAssignmentWithDelayedAllocationAndDifferentN ensureGreen("test"); } + public void testAllocationExplainReturnsNoWhenExtraReplicaShardInNonBatchMode() throws Exception { + // Non batch mode - This test is to validate that we don't return AWAITING_INFO in allocation explain API when the deciders are + // returning NO + this.allocationExplainReturnsNoWhenExtraReplicaShard(false); + } + + public void testAllocationExplainReturnsNoWhenExtraReplicaShardInBatchMode() throws Exception { + // Batch mode - This test is to validate that we don't return AWAITING_INFO in allocation explain API when the deciders are + // returning NO + this.allocationExplainReturnsNoWhenExtraReplicaShard(true); + } + public void testNBatchesCreationAndAssignment() throws Exception { // we will reduce batch size to 5 to make sure we have enough batches to test assignment // Total number of primary shards = 50 (50 indices*1) @@ -1104,7 +1149,9 @@ public void testNBatchesCreationAndAssignment() throws Exception { ); assertTrue(ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_BATCH_MODE.get(internalCluster().clusterService().getSettings())); assertEquals(10, gatewayAllocator.getNumberOfStartedShardBatches()); - assertEquals(10, gatewayAllocator.getNumberOfStoreShardBatches()); + // All replica shards would be marked ineligible since there are no data nodes. + // They would then be removed from any batch and batches would get deleted, so we would have 0 replica batches + assertEquals(0, gatewayAllocator.getNumberOfStoreShardBatches()); health = client(internalCluster().getClusterManagerName()).admin().cluster().health(Requests.clusterHealthRequest()).actionGet(); assertFalse(health.isTimedOut()); assertEquals(RED, health.getStatus()); @@ -1193,7 +1240,9 @@ public void testCulpritShardInBatch() throws Exception { ); assertTrue(ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_BATCH_MODE.get(internalCluster().clusterService().getSettings())); assertEquals(1, gatewayAllocator.getNumberOfStartedShardBatches()); - assertEquals(1, gatewayAllocator.getNumberOfStoreShardBatches()); + // Replica shard would be marked ineligible since there are no data nodes. + // It would then be removed from any batch and batches would get deleted, so we would have 0 replica batches + assertEquals(0, gatewayAllocator.getNumberOfStoreShardBatches()); assertTrue(clusterRerouteResponse.isAcknowledged()); health = client(internalCluster().getClusterManagerName()).admin().cluster().health(Requests.clusterHealthRequest()).actionGet(); assertFalse(health.isTimedOut()); @@ -1511,4 +1560,97 @@ private List findNodesWithShard(final boolean primary) { Collections.shuffle(requiredStartedShards, random()); return requiredStartedShards.stream().map(shard -> state.nodes().get(shard.currentNodeId()).getName()).collect(Collectors.toList()); } + + private void allocationExplainReturnsNoWhenExtraReplicaShard(boolean batchModeEnabled) throws Exception { + internalCluster().startClusterManagerOnlyNodes( + 1, + Settings.builder().put(ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_BATCH_MODE.getKey(), batchModeEnabled).build() + ); + internalCluster().startDataOnlyNodes(5); + createIndex( + "test", + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 4).build() + ); + ensureGreen("test"); + ensureStableCluster(6); + + // Stop one of the nodes to make the cluster yellow + // We cannot directly create an index with replica = data node count because then the whole flow will get skipped due to + // INDEX_CREATED + List nodesWithReplicaShards = findNodesWithShard(false); + Settings replicaNodeDataPathSettings = internalCluster().dataPathSettings(nodesWithReplicaShards.get(0)); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodesWithReplicaShards.get(0))); + + ensureStableCluster(5); + ensureYellow("test"); + + logger.info("--> calling allocation explain API"); + // shard should have decision NO because there is no valid node for the extra replica to go to + AllocateUnassignedDecision aud = client().admin() + .cluster() + .prepareAllocationExplain() + .setIndex("test") + .setShard(0) + .setPrimary(false) + .get() + .getExplanation() + .getShardAllocationDecision() + .getAllocateDecision(); + + assertEquals(AllocationDecision.NO, aud.getAllocationDecision()); + assertEquals("cannot allocate because allocation is not permitted to any of the nodes", aud.getExplanation()); + + // Now creating a new index with too many replicas and trying again + createIndex( + "test2", + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 5).build() + ); + + ensureYellowAndNoInitializingShards("test2"); + + logger.info("--> calling allocation explain API again"); + // shard should have decision NO because there are 6 replicas and 4 data nodes + aud = client().admin() + .cluster() + .prepareAllocationExplain() + .setIndex("test2") + .setShard(0) + .setPrimary(false) + .get() + .getExplanation() + .getShardAllocationDecision() + .getAllocateDecision(); + + assertEquals(AllocationDecision.NO, aud.getAllocationDecision()); + assertEquals("cannot allocate because allocation is not permitted to any of the nodes", aud.getExplanation()); + + logger.info("--> restarting the stopped node"); + internalCluster().startDataOnlyNode( + Settings.builder().put("node.name", nodesWithReplicaShards.get(0)).put(replicaNodeDataPathSettings).build() + ); + + ensureStableCluster(6); + ensureGreen("test"); + + logger.info("--> calling allocation explain API 3rd time"); + // shard should still have decision NO because there are 6 replicas and 5 data nodes + aud = client().admin() + .cluster() + .prepareAllocationExplain() + .setIndex("test2") + .setShard(0) + .setPrimary(false) + .get() + .getExplanation() + .getShardAllocationDecision() + .getAllocateDecision(); + + assertEquals(AllocationDecision.NO, aud.getAllocationDecision()); + assertEquals("cannot allocate because allocation is not permitted to any of the nodes", aud.getExplanation()); + + internalCluster().startDataOnlyNodes(1); + + ensureStableCluster(7); + ensureGreen("test2"); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteStatePublicationIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteStatePublicationIT.java new file mode 100644 index 0000000000000..6a2e7ce4957ae --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteStatePublicationIT.java @@ -0,0 +1,173 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote; + +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.opensearch.action.admin.cluster.state.ClusterStateResponse; +import org.opensearch.client.Client; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.gateway.remote.model.RemoteClusterMetadataManifest; +import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.repositories.fs.ReloadableFsRepository; +import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; +import org.opensearch.test.OpenSearchIntegTestCase.Scope; +import org.junit.Before; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Base64; +import java.util.Locale; +import java.util.Map; +import java.util.function.Function; +import java.util.stream.Collectors; + +import static org.opensearch.gateway.remote.RemoteClusterStateAttributesManager.DISCOVERY_NODES; +import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER; +import static org.opensearch.gateway.remote.model.RemoteClusterBlocks.CLUSTER_BLOCKS; +import static org.opensearch.gateway.remote.model.RemoteCoordinationMetadata.COORDINATION_METADATA; +import static org.opensearch.gateway.remote.model.RemoteCustomMetadata.CUSTOM_METADATA; +import static org.opensearch.gateway.remote.model.RemotePersistentSettingsMetadata.SETTING_METADATA; +import static org.opensearch.gateway.remote.model.RemoteTemplatesMetadata.TEMPLATES_METADATA; +import static org.opensearch.gateway.remote.model.RemoteTransientSettingsMetadata.TRANSIENT_SETTING_METADATA; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +@ClusterScope(scope = Scope.TEST, numDataNodes = 0) +public class RemoteStatePublicationIT extends RemoteStoreBaseIntegTestCase { + + private static String INDEX_NAME = "test-index"; + private boolean isRemoteStateEnabled = true; + private String isRemotePublicationEnabled = "true"; + + @Before + public void setup() { + asyncUploadMockFsRepo = false; + isRemoteStateEnabled = true; + isRemotePublicationEnabled = "true"; + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder() + .put(super.featureFlagSettings()) + .put(FeatureFlags.REMOTE_PUBLICATION_EXPERIMENTAL, isRemotePublicationEnabled) + .build(); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + String routingTableRepoName = "remote-routing-repo"; + String routingTableRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + routingTableRepoName + ); + String routingTableRepoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + routingTableRepoName + ); + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), isRemoteStateEnabled) + .put("node.attr." + REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY, routingTableRepoName) + .put(routingTableRepoTypeAttributeKey, ReloadableFsRepository.TYPE) + .put(routingTableRepoSettingsAttributeKeyPrefix + "location", segmentRepoPath) + .build(); + } + + public void testPublication() throws Exception { + // create cluster with multi node (3 master + 2 data) + prepareCluster(3, 2, INDEX_NAME, 1, 2); + ensureStableCluster(5); + ensureGreen(INDEX_NAME); + // update settings on a random node + assertAcked( + internalCluster().client() + .admin() + .cluster() + .updateSettings( + new ClusterUpdateSettingsRequest().persistentSettings( + Settings.builder().put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), "10mb").build() + ) + ) + .actionGet() + ); + + RepositoriesService repositoriesService = internalCluster().getClusterManagerNodeInstance(RepositoriesService.class); + BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(REPOSITORY_NAME); + + Map globalMetadataFiles = getMetadataFiles(repository, RemoteClusterStateUtils.GLOBAL_METADATA_PATH_TOKEN); + + assertTrue(globalMetadataFiles.containsKey(COORDINATION_METADATA)); + assertTrue(globalMetadataFiles.containsKey(SETTING_METADATA)); + assertTrue(globalMetadataFiles.containsKey(TRANSIENT_SETTING_METADATA)); + assertTrue(globalMetadataFiles.containsKey(TEMPLATES_METADATA)); + assertTrue(globalMetadataFiles.keySet().stream().anyMatch(key -> key.startsWith(CUSTOM_METADATA))); + + Map ephemeralMetadataFiles = getMetadataFiles( + repository, + RemoteClusterStateUtils.CLUSTER_STATE_EPHEMERAL_PATH_TOKEN + ); + + assertTrue(ephemeralMetadataFiles.containsKey(CLUSTER_BLOCKS)); + assertTrue(ephemeralMetadataFiles.containsKey(DISCOVERY_NODES)); + + Map manifestFiles = getMetadataFiles(repository, RemoteClusterMetadataManifest.MANIFEST); + assertTrue(manifestFiles.containsKey(RemoteClusterMetadataManifest.MANIFEST)); + + // get settings from each node and verify that it is updated + Settings settings = clusterService().getSettings(); + logger.info("settings : {}", settings); + for (Client client : clients()) { + ClusterStateResponse response = client.admin().cluster().prepareState().clear().setMetadata(true).get(); + String refreshSetting = response.getState() + .metadata() + .settings() + .get(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()); + assertEquals("10mb", refreshSetting); + } + } + + public void testRemotePublicationDisableIfRemoteStateDisabled() { + // only disable remote state + isRemoteStateEnabled = false; + // create cluster with multi node with in-consistent settings + prepareCluster(3, 2, INDEX_NAME, 1, 2); + // assert cluster is stable, ensuring publication falls back to legacy transport with inconsistent settings + ensureStableCluster(5); + ensureGreen(INDEX_NAME); + + assertNull(internalCluster().getCurrentClusterManagerNodeInstance(RemoteClusterStateService.class)); + } + + private Map getMetadataFiles(BlobStoreRepository repository, String subDirectory) throws IOException { + BlobPath metadataPath = repository.basePath() + .add( + Base64.getUrlEncoder() + .withoutPadding() + .encodeToString(getClusterState().getClusterName().value().getBytes(StandardCharsets.UTF_8)) + ) + .add(RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN) + .add(getClusterState().metadata().clusterUUID()) + .add(subDirectory); + return repository.blobStore().blobContainer(metadataPath).listBlobs().keySet().stream().map(fileName -> { + logger.info(fileName); + return fileName.split(DELIMITER)[0]; + }).collect(Collectors.toMap(Function.identity(), key -> 1, Integer::sum)); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java b/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java index 8e5193b650868..52c6c6801a3c2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java @@ -264,18 +264,19 @@ public void testValidCompositeIndex() { ); assertEquals(expectedTimeUnits, dateDim.getIntervals()); assertEquals("numeric_dv", starTreeFieldType.getDimensions().get(1).getField()); + assertEquals(2, starTreeFieldType.getMetrics().size()); assertEquals("numeric_dv", starTreeFieldType.getMetrics().get(0).getField()); - List expectedMetrics = Arrays.asList( - MetricStat.AVG, - MetricStat.COUNT, - MetricStat.SUM, - MetricStat.MAX, - MetricStat.MIN - ); + + // Assert default metrics + List expectedMetrics = Arrays.asList(MetricStat.VALUE_COUNT, MetricStat.SUM, MetricStat.AVG); assertEquals(expectedMetrics, starTreeFieldType.getMetrics().get(0).getMetrics()); + + assertEquals("_doc_count", starTreeFieldType.getMetrics().get(1).getField()); + assertEquals(List.of(MetricStat.DOC_COUNT), starTreeFieldType.getMetrics().get(1).getMetrics()); + assertEquals(10000, starTreeFieldType.getStarTreeConfig().maxLeafDocs()); assertEquals( - StarTreeFieldConfiguration.StarTreeBuildMode.ON_HEAP, + StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP, starTreeFieldType.getStarTreeConfig().getBuildMode() ); assertEquals(Collections.emptySet(), starTreeFieldType.getStarTreeConfig().getSkipStarNodeCreationInDims()); @@ -349,17 +350,13 @@ public void testUpdateIndexWhenMappingIsSame() { assertEquals(expectedTimeUnits, dateDim.getIntervals()); assertEquals("numeric_dv", starTreeFieldType.getDimensions().get(1).getField()); assertEquals("numeric_dv", starTreeFieldType.getMetrics().get(0).getField()); - List expectedMetrics = Arrays.asList( - MetricStat.AVG, - MetricStat.COUNT, - MetricStat.SUM, - MetricStat.MAX, - MetricStat.MIN - ); + + // Assert default metrics + List expectedMetrics = Arrays.asList(MetricStat.VALUE_COUNT, MetricStat.SUM, MetricStat.AVG); assertEquals(expectedMetrics, starTreeFieldType.getMetrics().get(0).getMetrics()); assertEquals(10000, starTreeFieldType.getStarTreeConfig().maxLeafDocs()); assertEquals( - StarTreeFieldConfiguration.StarTreeBuildMode.ON_HEAP, + StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP, starTreeFieldType.getStarTreeConfig().getBuildMode() ); assertEquals(Collections.emptySet(), starTreeFieldType.getStarTreeConfig().getSkipStarNodeCreationInDims()); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java index 09d5c208a8756..108ef14f0fcb4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java @@ -34,6 +34,12 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.Weight; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.action.admin.cluster.node.stats.NodeStats; import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; @@ -56,7 +62,10 @@ import org.opensearch.env.NodeEnvironment; import org.opensearch.index.IndexSettings; import org.opensearch.index.cache.request.RequestCacheStats; +import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryBuilders; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.index.query.TermQueryBuilder; import org.opensearch.search.aggregations.bucket.global.GlobalAggregationBuilder; import org.opensearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.opensearch.search.aggregations.bucket.histogram.Histogram; @@ -65,6 +74,7 @@ import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.hamcrest.OpenSearchAssertions; +import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.time.ZoneId; @@ -768,6 +778,59 @@ public void testDeleteAndCreateSameIndexShardOnSameNode() throws Exception { assertTrue(stats.getMemorySizeInBytes() == 0); } + public void testTimedOutQuery() throws Exception { + // A timed out query should be cached and then invalidated + Client client = client(); + String index = "index"; + assertAcked( + client.admin() + .indices() + .prepareCreate(index) + .setMapping("k", "type=keyword") + .setSettings( + Settings.builder() + .put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + // Disable index refreshing to avoid cache being invalidated mid-test + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), TimeValue.timeValueMillis(-1)) + ) + .get() + ); + indexRandom(true, client.prepareIndex(index).setSource("k", "hello")); + ensureSearchable(index); + // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache + forceMerge(client, index); + + QueryBuilder timeoutQueryBuilder = new TermQueryBuilder("k", "hello") { + @Override + protected Query doToQuery(QueryShardContext context) { + return new TermQuery(new Term("k", "hello")) { + @Override + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { + // Create the weight before sleeping. Otherwise, TermStates.build() (in the call to super.createWeight()) will + // sometimes throw an exception on timeout, rather than timing out gracefully. + Weight result = super.createWeight(searcher, scoreMode, boost); + try { + Thread.sleep(500); + } catch (InterruptedException ignored) {} + return result; + } + }; + } + }; + + SearchResponse resp = client.prepareSearch(index) + .setRequestCache(true) + .setQuery(timeoutQueryBuilder) + .setTimeout(TimeValue.ZERO) + .get(); + assertTrue(resp.isTimedOut()); + RequestCacheStats requestCacheStats = getRequestCacheStats(client, index); + // The cache should be empty as the timed-out query was invalidated + assertEquals(0, requestCacheStats.getMemorySizeInBytes()); + } + private Path[] shardDirectory(String server, Index index, int shard) { NodeEnvironment env = internalCluster().getInstance(NodeEnvironment.class, server); final Path[] paths = env.availableShardPaths(new ShardId(index, shard)); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/DanglingIndicesIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/DanglingIndicesIT.java index 8fd7961cab3a7..7bdf33edf1534 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/DanglingIndicesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/DanglingIndicesIT.java @@ -298,8 +298,8 @@ public void testMustAcceptDataLossToImportDanglingIndex() throws Exception { * 1, then create two indices and delete them both while one node in * the cluster is stopped. The deletion of the second pushes the deletion * of the first out of the graveyard. When the stopped node is resumed, - * only the second index will be found into the graveyard and the the - * other will be considered dangling, and can therefore be listed and + * only the second index will be found into the graveyard and the other + * will be considered dangling, and can therefore be listed and * deleted through the API */ public void testDanglingIndexCanBeDeleted() throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java index 63a9451a27a12..f83ae3e0ca820 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java @@ -313,7 +313,7 @@ public void assertRemoteStoreRepositoryOnAllNodes(String repositoryName) { } } - public static int getFileCount(Path path) throws Exception { + public static int getFileCount(Path path) throws IOException { final AtomicInteger filesExisting = new AtomicInteger(0); Files.walkFileTree(path, new SimpleFileVisitor<>() { @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java index 194dce5f4a57a..a327b683874f6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java @@ -217,10 +217,15 @@ public void testStaleCommitDeletionWithInvokeFlush() throws Exception { } else { // As delete is async its possible that the file gets created before the deletion or after // deletion. - MatcherAssert.assertThat( - actualFileCount, - is(oneOf(lastNMetadataFilesToKeep - 1, lastNMetadataFilesToKeep, lastNMetadataFilesToKeep + 1)) - ); + if (RemoteStoreSettings.isPinnedTimestampsEnabled()) { + // With pinned timestamp, we also keep md files since last successful fetch + assertTrue(actualFileCount >= lastNMetadataFilesToKeep); + } else { + MatcherAssert.assertThat( + actualFileCount, + is(oneOf(lastNMetadataFilesToKeep - 1, lastNMetadataFilesToKeep, lastNMetadataFilesToKeep + 1)) + ); + } } }, 30, TimeUnit.SECONDS); } @@ -249,7 +254,12 @@ public void testStaleCommitDeletionWithMinSegmentFiles_3() throws Exception { Path indexPath = Path.of(segmentRepoPath + "/" + shardPath); int actualFileCount = getFileCount(indexPath); // We also allow (numberOfIterations + 1) as index creation also triggers refresh. - MatcherAssert.assertThat(actualFileCount, is(oneOf(4))); + if (RemoteStoreSettings.isPinnedTimestampsEnabled()) { + // With pinned timestamp, we also keep md files since last successful fetch + assertTrue(actualFileCount >= 4); + } else { + assertEquals(4, actualFileCount); + } } public void testStaleCommitDeletionWithMinSegmentFiles_Disabled() throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStorePinnedTimestampsIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStorePinnedTimestampsIT.java new file mode 100644 index 0000000000000..cb91c63e17245 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStorePinnedTimestampsIT.java @@ -0,0 +1,96 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotestore; + +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; +import org.opensearch.indices.RemoteStoreSettings; +import org.opensearch.node.remotestore.RemoteStorePinnedTimestampService; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.util.Set; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class RemoteStorePinnedTimestampsIT extends RemoteStoreBaseIntegTestCase { + static final String INDEX_NAME = "remote-store-test-idx-1"; + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED.getKey(), true) + .build(); + } + + ActionListener noOpActionListener = new ActionListener<>() { + @Override + public void onResponse(Void unused) {} + + @Override + public void onFailure(Exception e) {} + }; + + public void testTimestampPinUnpin() throws Exception { + prepareCluster(1, 1, INDEX_NAME, 0, 2); + ensureGreen(INDEX_NAME); + + RemoteStorePinnedTimestampService remoteStorePinnedTimestampService = internalCluster().getInstance( + RemoteStorePinnedTimestampService.class, + primaryNodeName(INDEX_NAME) + ); + + Tuple> pinnedTimestampWithFetchTimestamp = RemoteStorePinnedTimestampService.getPinnedTimestamps(); + long lastFetchTimestamp = pinnedTimestampWithFetchTimestamp.v1(); + assertEquals(-1L, lastFetchTimestamp); + assertEquals(Set.of(), pinnedTimestampWithFetchTimestamp.v2()); + + assertThrows( + IllegalArgumentException.class, + () -> remoteStorePinnedTimestampService.pinTimestamp(1234L, "ss1", noOpActionListener) + ); + + long timestamp1 = System.currentTimeMillis() + 30000L; + long timestamp2 = System.currentTimeMillis() + 60000L; + long timestamp3 = System.currentTimeMillis() + 900000L; + remoteStorePinnedTimestampService.pinTimestamp(timestamp1, "ss2", noOpActionListener); + remoteStorePinnedTimestampService.pinTimestamp(timestamp2, "ss3", noOpActionListener); + remoteStorePinnedTimestampService.pinTimestamp(timestamp3, "ss4", noOpActionListener); + + remoteStorePinnedTimestampService.rescheduleAsyncUpdatePinnedTimestampTask(TimeValue.timeValueSeconds(1)); + + assertBusy(() -> { + Tuple> pinnedTimestampWithFetchTimestamp_2 = RemoteStorePinnedTimestampService.getPinnedTimestamps(); + long lastFetchTimestamp_2 = pinnedTimestampWithFetchTimestamp_2.v1(); + assertTrue(lastFetchTimestamp_2 != -1); + assertEquals(Set.of(timestamp1, timestamp2, timestamp3), pinnedTimestampWithFetchTimestamp_2.v2()); + }); + + remoteStorePinnedTimestampService.rescheduleAsyncUpdatePinnedTimestampTask(TimeValue.timeValueMinutes(3)); + + // This should be a no-op as pinning entity is different + remoteStorePinnedTimestampService.unpinTimestamp(timestamp1, "no-snapshot", noOpActionListener); + // Unpinning already pinned entity + remoteStorePinnedTimestampService.unpinTimestamp(timestamp2, "ss3", noOpActionListener); + // Adding different entity to already pinned timestamp + remoteStorePinnedTimestampService.pinTimestamp(timestamp3, "ss5", noOpActionListener); + + remoteStorePinnedTimestampService.rescheduleAsyncUpdatePinnedTimestampTask(TimeValue.timeValueSeconds(1)); + + assertBusy(() -> { + Tuple> pinnedTimestampWithFetchTimestamp_3 = RemoteStorePinnedTimestampService.getPinnedTimestamps(); + long lastFetchTimestamp_3 = pinnedTimestampWithFetchTimestamp_3.v1(); + assertTrue(lastFetchTimestamp_3 != -1); + assertEquals(Set.of(timestamp1, timestamp3), pinnedTimestampWithFetchTimestamp_3.v2()); + }); + + remoteStorePinnedTimestampService.rescheduleAsyncUpdatePinnedTimestampTask(TimeValue.timeValueMinutes(3)); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java index 31c73e2fc03ae..86d586cd17146 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java @@ -250,7 +250,7 @@ public void testStatsResponseFromLocalNode() { } } - @TestLogging(reason = "Getting trace logs from remote store package", value = "org.opensearch.remotestore:TRACE") + @TestLogging(reason = "Getting trace logs from remote store package", value = "org.opensearch.index.shard:TRACE") public void testDownloadStatsCorrectnessSinglePrimarySingleReplica() throws Exception { setup(); // Scenario: @@ -280,11 +280,13 @@ public void testDownloadStatsCorrectnessSinglePrimarySingleReplica() throws Exce .get(0) .getSegmentStats(); logger.info( - "Zero state primary stats: {}ms refresh time lag, {}b bytes lag, {}b upload bytes started and {}b upload bytes failed.", + "Zero state primary stats: {}ms refresh time lag, {}b bytes lag, {}b upload bytes started, {}b upload bytes failed , {} uploads succeeded, {} upload byes succeeded.", zeroStatePrimaryStats.refreshTimeLagMs, zeroStatePrimaryStats.bytesLag, zeroStatePrimaryStats.uploadBytesStarted, - zeroStatePrimaryStats.uploadBytesFailed + zeroStatePrimaryStats.uploadBytesFailed, + zeroStatePrimaryStats.totalUploadsSucceeded, + zeroStatePrimaryStats.uploadBytesSucceeded ); assertTrue( zeroStatePrimaryStats.totalUploadsStarted == zeroStatePrimaryStats.totalUploadsSucceeded @@ -348,7 +350,7 @@ public void testDownloadStatsCorrectnessSinglePrimarySingleReplica() throws Exce } } - @TestLogging(reason = "Getting trace logs from remote store package", value = "org.opensearch.remotestore:TRACE") + @TestLogging(reason = "Getting trace logs from remote store package", value = "org.opensearch.index.shard:TRACE") public void testDownloadStatsCorrectnessSinglePrimaryMultipleReplicaShards() throws Exception { setup(); // Scenario: @@ -382,11 +384,13 @@ public void testDownloadStatsCorrectnessSinglePrimaryMultipleReplicaShards() thr .get(0) .getSegmentStats(); logger.info( - "Zero state primary stats: {}ms refresh time lag, {}b bytes lag, {}b upload bytes started and {}b upload bytes failed.", + "Zero state primary stats: {}ms refresh time lag, {}b bytes lag, {}b upload bytes started, {}b upload bytes failed , {} uploads succeeded, {} upload byes succeeded.", zeroStatePrimaryStats.refreshTimeLagMs, zeroStatePrimaryStats.bytesLag, zeroStatePrimaryStats.uploadBytesStarted, - zeroStatePrimaryStats.uploadBytesFailed + zeroStatePrimaryStats.uploadBytesFailed, + zeroStatePrimaryStats.totalUploadsSucceeded, + zeroStatePrimaryStats.uploadBytesSucceeded ); assertTrue( zeroStatePrimaryStats.totalUploadsStarted == zeroStatePrimaryStats.totalUploadsSucceeded @@ -617,7 +621,7 @@ public void testNonZeroPrimaryStatsOnNewlyCreatedIndexWithZeroDocs() throws Exce } assertZeroTranslogDownloadStats(translogStats); }); - }, 5, TimeUnit.SECONDS); + }, 10, TimeUnit.SECONDS); } public void testStatsCorrectnessOnFailover() { diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/WritableWarmIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/WritableWarmIT.java index a51bd6b20fff0..88c9ae436e85f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/WritableWarmIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/WritableWarmIT.java @@ -20,6 +20,8 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsException; import org.opensearch.common.util.FeatureFlags; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.index.IndexModule; import org.opensearch.index.query.QueryBuilders; import org.opensearch.index.shard.IndexShard; @@ -65,11 +67,20 @@ protected Settings featureFlagSettings() { return featureSettings.build(); } + @Override + protected Settings nodeSettings(int nodeOrdinal) { + ByteSizeValue cacheSize = new ByteSizeValue(16, ByteSizeUnit.GB); + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(Node.NODE_SEARCH_CACHE_SIZE_SETTING.getKey(), cacheSize.toString()) + .build(); + } + public void testWritableWarmFeatureFlagDisabled() { Settings clusterSettings = Settings.builder().put(super.nodeSettings(0)).put(FeatureFlags.TIERED_REMOTE_INDEX, false).build(); InternalTestCluster internalTestCluster = internalCluster(); internalTestCluster.startClusterManagerOnlyNode(clusterSettings); - internalTestCluster.startDataOnlyNode(clusterSettings); + internalTestCluster.startDataAndSearchNodes(1); Settings indexSettings = Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) @@ -94,7 +105,7 @@ public void testWritableWarmFeatureFlagDisabled() { public void testWritableWarmBasic() throws Exception { InternalTestCluster internalTestCluster = internalCluster(); internalTestCluster.startClusterManagerOnlyNode(); - internalTestCluster.startDataOnlyNode(); + internalTestCluster.startDataAndSearchNodes(1); Settings settings = Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java index 01ad06757640c..3cf63e2f19a16 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java @@ -52,6 +52,7 @@ import org.opensearch.common.time.DateFormatter; import org.opensearch.common.unit.Fuzziness; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; @@ -66,6 +67,7 @@ import org.opensearch.index.query.QueryBuilders; import org.opensearch.index.query.RangeQueryBuilder; import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.index.query.TermsQueryBuilder; import org.opensearch.index.query.WildcardQueryBuilder; import org.opensearch.index.query.WrapperQueryBuilder; import org.opensearch.index.query.functionscore.ScoreFunctionBuilders; @@ -84,6 +86,7 @@ import java.io.IOException; import java.io.Reader; +import java.nio.ByteBuffer; import java.time.Instant; import java.time.ZoneId; import java.time.ZoneOffset; @@ -98,6 +101,8 @@ import java.util.concurrent.ExecutionException; import java.util.regex.Pattern; +import org.roaringbitmap.RoaringBitmap; + import static java.util.Collections.singletonMap; import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; @@ -1157,6 +1162,41 @@ public void testTermsQuery() throws Exception { assertHitCount(searchResponse, 0L); } + public void testTermsQueryWithBitmapDocValuesQuery() throws Exception { + assertAcked( + prepareCreate("products").setMapping( + jsonBuilder().startObject() + .startObject("properties") + .startObject("product") + .field("type", "integer") + .field("index", false) + .endObject() + .endObject() + .endObject() + ) + ); + indexRandom( + true, + client().prepareIndex("products").setId("1").setSource("product", 1), + client().prepareIndex("products").setId("2").setSource("product", 2), + client().prepareIndex("products").setId("3").setSource("product", new int[] { 1, 3 }), + client().prepareIndex("products").setId("4").setSource("product", 4) + ); + + RoaringBitmap r = new RoaringBitmap(); + r.add(1); + r.add(4); + byte[] array = new byte[r.serializedSizeInBytes()]; + r.serialize(ByteBuffer.wrap(array)); + BytesArray bitmap = new BytesArray(array); + // directly building the terms query builder, so pass in the bitmap value as BytesArray + SearchResponse searchResponse = client().prepareSearch("products") + .setQuery(constantScoreQuery(termsQuery("product", bitmap).valueType(TermsQueryBuilder.ValueType.BITMAP))) + .get(); + assertHitCount(searchResponse, 3L); + assertSearchHits(searchResponse, "1", "3", "4"); + } + public void testTermsLookupFilter() throws Exception { assertAcked(prepareCreate("lookup").setMapping("terms", "type=text", "other", "type=text")); indexRandomForConcurrentSearch("lookup"); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java b/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java index e40928f15e8a8..fdb12639c65be 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java @@ -42,6 +42,7 @@ import org.opensearch.action.bulk.BulkRequestBuilder; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchPhaseExecutionException; +import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.ShardSearchFailure; import org.opensearch.cluster.metadata.IndexMetadata; @@ -90,6 +91,7 @@ import static org.opensearch.script.MockScriptPlugin.NAME; import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFailures; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFirstHit; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; @@ -919,7 +921,7 @@ public void testSortMissingNumbers() throws Exception { client().prepareIndex("test") .setId("3") .setSource( - jsonBuilder().startObject().field("id", "3").field("i_value", 2).field("d_value", 2.2).field("u_value", 2).endObject() + jsonBuilder().startObject().field("id", "3").field("i_value", 2).field("d_value", 2.2).field("u_value", 3).endObject() ) .get(); @@ -964,6 +966,18 @@ public void testSortMissingNumbers() throws Exception { assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("1")); assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); + logger.info("--> sort with custom missing value"); + searchResponse = client().prepareSearch() + .setQuery(matchAllQuery()) + .addSort(SortBuilders.fieldSort("i_value").order(SortOrder.ASC).missing(randomBoolean() ? 1 : "1")) + .get(); + assertNoFailures(searchResponse); + + assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); + assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); + assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2")); + assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); + // FLOAT logger.info("--> sort with no missing (same as missing _last)"); searchResponse = client().prepareSearch() @@ -1001,6 +1015,18 @@ public void testSortMissingNumbers() throws Exception { assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("1")); assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); + logger.info("--> sort with custom missing value"); + searchResponse = client().prepareSearch() + .setQuery(matchAllQuery()) + .addSort(SortBuilders.fieldSort("d_value").order(SortOrder.ASC).missing(randomBoolean() ? 1.1 : "1.1")) + .get(); + assertNoFailures(searchResponse); + + assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); + assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); + assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2")); + assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); + // UNSIGNED_LONG logger.info("--> sort with no missing (same as missing _last)"); searchResponse = client().prepareSearch() @@ -1037,6 +1063,24 @@ public void testSortMissingNumbers() throws Exception { assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("2")); assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("1")); assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); + + logger.info("--> sort with custom missing value"); + searchResponse = client().prepareSearch() + .setQuery(matchAllQuery()) + .addSort(SortBuilders.fieldSort("u_value").order(SortOrder.ASC).missing(randomBoolean() ? 2 : "2")) + .get(); + assertNoFailures(searchResponse); + + assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); + assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); + assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2")); + assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); + + logger.info("--> sort with negative missing value"); + SearchRequestBuilder searchRequestBuilder = client().prepareSearch() + .setQuery(matchAllQuery()) + .addSort(SortBuilders.fieldSort("u_value").order(SortOrder.ASC).missing(randomBoolean() ? -1 : "-1")); + assertFailures(searchRequestBuilder, RestStatus.BAD_REQUEST, containsString("Value [-1] is out of range for an unsigned long")); } public void testSortMissingNumbersMinMax() throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java index e688a4491b1a7..2331d52c3a1bc 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java @@ -19,6 +19,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.index.store.RemoteBufferedOutputDirectory; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.blobstore.BlobStoreRepository; @@ -287,8 +288,14 @@ public void testDeleteMultipleShallowCopySnapshotsCase3() throws Exception { public void testRemoteStoreCleanupForDeletedIndex() throws Exception { disableRepoConsistencyCheck("Remote store repository is being used in the test"); final Path remoteStoreRepoPath = randomRepoPath(); - internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); - internalCluster().startDataOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); + Settings settings = remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath); + // Disabling pinned timestamp as this test is specifically for shallow snapshot. + settings = Settings.builder() + .put(settings) + .put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED.getKey(), false) + .build(); + internalCluster().startClusterManagerOnlyNode(settings); + internalCluster().startDataOnlyNode(settings); final Client clusterManagerClient = internalCluster().clusterManagerClient(); ensureStableCluster(2); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java index 1c199df4d548e..a19bbe49ad340 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java @@ -67,7 +67,6 @@ import java.util.stream.StreamSupport; import static org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest.Metric.FS; -import static org.opensearch.common.util.FeatureFlags.TIERED_REMOTE_INDEX; import static org.opensearch.core.common.util.CollectionUtils.iterableAsArrayList; import static org.opensearch.index.store.remote.filecache.FileCacheSettings.DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING; import static org.opensearch.test.NodeRoles.clusterManagerOnlyNode; @@ -1019,11 +1018,12 @@ public void testStartSearchNode() throws Exception { internalCluster().startNode(Settings.builder().put(onlyRole(DiscoveryNodeRole.SEARCH_ROLE))); // test start node without search role internalCluster().startNode(Settings.builder().put(onlyRole(DiscoveryNodeRole.DATA_ROLE))); - // test start non-dedicated search node with TIERED_REMOTE_INDEX feature enabled - internalCluster().startNode( - Settings.builder() - .put(onlyRoles(Set.of(DiscoveryNodeRole.SEARCH_ROLE, DiscoveryNodeRole.DATA_ROLE))) - .put(TIERED_REMOTE_INDEX, true) + // test start non-dedicated search node, if the user doesn't configure the cache size, it fails + assertThrows( + SettingsException.class, + () -> internalCluster().startNode( + Settings.builder().put(onlyRoles(Set.of(DiscoveryNodeRole.SEARCH_ROLE, DiscoveryNodeRole.DATA_ROLE))) + ) ); // test start non-dedicated search node assertThrows( diff --git a/server/src/main/java/org/apache/lucene/codecs/lucene90/Lucene90DocValuesConsumerWrapper.java b/server/src/main/java/org/apache/lucene/codecs/lucene90/Lucene90DocValuesConsumerWrapper.java new file mode 100644 index 0000000000000..67ee45f4c9306 --- /dev/null +++ b/server/src/main/java/org/apache/lucene/codecs/lucene90/Lucene90DocValuesConsumerWrapper.java @@ -0,0 +1,46 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.apache.lucene.codecs.lucene90; + +import org.apache.lucene.codecs.DocValuesConsumer; +import org.apache.lucene.index.SegmentWriteState; + +import java.io.Closeable; +import java.io.IOException; + +/** + * This class is an abstraction of the {@link DocValuesConsumer} for the Star Tree index structure. + * It is responsible to consume various types of document values (numeric, binary, sorted, sorted numeric, + * and sorted set) for fields in the Star Tree index. + * + * @opensearch.experimental + */ +public class Lucene90DocValuesConsumerWrapper implements Closeable { + + private final Lucene90DocValuesConsumer lucene90DocValuesConsumer; + + public Lucene90DocValuesConsumerWrapper( + SegmentWriteState state, + String dataCodec, + String dataExtension, + String metaCodec, + String metaExtension + ) throws IOException { + lucene90DocValuesConsumer = new Lucene90DocValuesConsumer(state, dataCodec, dataExtension, metaCodec, metaExtension); + } + + public Lucene90DocValuesConsumer getLucene90DocValuesConsumer() { + return lucene90DocValuesConsumer; + } + + @Override + public void close() throws IOException { + lucene90DocValuesConsumer.close(); + } +} diff --git a/server/src/main/java/org/apache/lucene/codecs/lucene90/Lucene90DocValuesProducerWrapper.java b/server/src/main/java/org/apache/lucene/codecs/lucene90/Lucene90DocValuesProducerWrapper.java new file mode 100644 index 0000000000000..a213852c59094 --- /dev/null +++ b/server/src/main/java/org/apache/lucene/codecs/lucene90/Lucene90DocValuesProducerWrapper.java @@ -0,0 +1,46 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.apache.lucene.codecs.lucene90; + +import org.apache.lucene.codecs.DocValuesProducer; +import org.apache.lucene.index.SegmentReadState; + +import java.io.Closeable; +import java.io.IOException; + +/** + * This class is a custom abstraction of the {@link DocValuesProducer} for the Star Tree index structure. + * It is responsible for providing access to various types of document values (numeric, binary, sorted, sorted numeric, + * and sorted set) for fields in the Star Tree index. + * + * @opensearch.experimental + */ +public class Lucene90DocValuesProducerWrapper implements Closeable { + + private final Lucene90DocValuesProducer lucene90DocValuesProducer; + + public Lucene90DocValuesProducerWrapper( + SegmentReadState state, + String dataCodec, + String dataExtension, + String metaCodec, + String metaExtension + ) throws IOException { + lucene90DocValuesProducer = new Lucene90DocValuesProducer(state, dataCodec, dataExtension, metaCodec, metaExtension); + } + + public DocValuesProducer getLucene90DocValuesProducer() { + return lucene90DocValuesProducer; + } + + @Override + public void close() throws IOException { + lucene90DocValuesProducer.close(); + } +} diff --git a/server/src/main/java/org/apache/lucene/index/SortedNumericDocValuesWriterWrapper.java b/server/src/main/java/org/apache/lucene/index/SortedNumericDocValuesWriterWrapper.java new file mode 100644 index 0000000000000..f7759fcced284 --- /dev/null +++ b/server/src/main/java/org/apache/lucene/index/SortedNumericDocValuesWriterWrapper.java @@ -0,0 +1,53 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.apache.lucene.index; + +import org.apache.lucene.util.Counter; + +/** + * A wrapper class for writing sorted numeric doc values. + *

+ * This class provides a convenient way to add sorted numeric doc values to a field + * and retrieve the corresponding {@link SortedNumericDocValues} instance. + * + * @opensearch.experimental + */ +public class SortedNumericDocValuesWriterWrapper { + + private final SortedNumericDocValuesWriter sortedNumericDocValuesWriter; + + /** + * Sole constructor. Constructs a new {@link SortedNumericDocValuesWriterWrapper} instance. + * + * @param fieldInfo the field information for the field being written + * @param counter a counter for tracking memory usage + */ + public SortedNumericDocValuesWriterWrapper(FieldInfo fieldInfo, Counter counter) { + sortedNumericDocValuesWriter = new SortedNumericDocValuesWriter(fieldInfo, counter); + } + + /** + * Adds a value to the sorted numeric doc values for the specified document. + * + * @param docID the document ID + * @param value the value to add + */ + public void addValue(int docID, long value) { + sortedNumericDocValuesWriter.addValue(docID, value); + } + + /** + * Returns the {@link SortedNumericDocValues} instance containing the sorted numeric doc values + * + * @return the {@link SortedNumericDocValues} instance + */ + public SortedNumericDocValues getDocValues() { + return sortedNumericDocValuesWriter.getDocValues(); + } +} diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index 574b7029a6501..c86e6580122d5 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -1200,9 +1200,12 @@ public void unregisterDynamicRoute(NamedRoute route) { * @param route The {@link RestHandler.Route}. * @return the corresponding {@link RestSendToExtensionAction} if it is registered, null otherwise. */ - @SuppressWarnings("unchecked") public RestSendToExtensionAction get(RestHandler.Route route) { - return routeRegistry.get(route); + if (route instanceof NamedRoute) { + return routeRegistry.get((NamedRoute) route); + } + // Only NamedRoutes are map keys so any other route is not in the map + return null; } } } diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java b/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java index 97ef94055faf7..d1d5f568fc09d 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java @@ -27,6 +27,7 @@ @PublicApi(since = "2.11.0") public final class SearchRequestStats extends SearchRequestOperationsListener { Map phaseStatsMap = new EnumMap<>(SearchPhaseName.class); + StatsHolder tookStatsHolder; public static final String SEARCH_REQUEST_STATS_ENABLED_KEY = "search.request_stats_enabled"; public static final Setting SEARCH_REQUEST_STATS_ENABLED = Setting.boolSetting( @@ -40,6 +41,7 @@ public final class SearchRequestStats extends SearchRequestOperationsListener { public SearchRequestStats(ClusterSettings clusterSettings) { this.setEnabled(clusterSettings.get(SEARCH_REQUEST_STATS_ENABLED)); clusterSettings.addSettingsUpdateConsumer(SEARCH_REQUEST_STATS_ENABLED, this::setEnabled); + tookStatsHolder = new StatsHolder(); for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { phaseStatsMap.put(searchPhaseName, new StatsHolder()); } @@ -57,6 +59,18 @@ public long getPhaseMetric(SearchPhaseName searchPhaseName) { return phaseStatsMap.get(searchPhaseName).timing.sum(); } + public long getTookCurrent() { + return tookStatsHolder.current.count(); + } + + public long getTookTotal() { + return tookStatsHolder.total.count(); + } + + public long getTookMetric() { + return tookStatsHolder.timing.sum(); + } + @Override protected void onPhaseStart(SearchPhaseContext context) { phaseStatsMap.get(context.getCurrentPhase().getSearchPhaseName()).current.inc(); @@ -75,6 +89,23 @@ protected void onPhaseFailure(SearchPhaseContext context, Throwable cause) { phaseStatsMap.get(context.getCurrentPhase().getSearchPhaseName()).current.dec(); } + @Override + protected void onRequestStart(SearchRequestContext searchRequestContext) { + tookStatsHolder.current.inc(); + } + + @Override + protected void onRequestEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { + tookStatsHolder.current.dec(); + tookStatsHolder.total.inc(); + tookStatsHolder.timing.inc(TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - searchRequestContext.getAbsoluteStartNanos())); + } + + @Override + protected void onRequestFailure(SearchPhaseContext context, SearchRequestContext searchRequestContext) { + tookStatsHolder.current.dec(); + } + /** * Holder of statistics values * diff --git a/server/src/main/java/org/opensearch/action/search/SearchShardTask.java b/server/src/main/java/org/opensearch/action/search/SearchShardTask.java index dfecf4f462c4d..ed2943db94420 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchShardTask.java +++ b/server/src/main/java/org/opensearch/action/search/SearchShardTask.java @@ -37,8 +37,8 @@ import org.opensearch.core.tasks.TaskId; import org.opensearch.search.fetch.ShardFetchSearchRequest; import org.opensearch.search.internal.ShardSearchRequest; -import org.opensearch.tasks.CancellableTask; import org.opensearch.tasks.SearchBackpressureTask; +import org.opensearch.wlm.QueryGroupTask; import java.util.Map; import java.util.function.Supplier; @@ -50,7 +50,7 @@ * @opensearch.api */ @PublicApi(since = "1.0.0") -public class SearchShardTask extends CancellableTask implements SearchBackpressureTask { +public class SearchShardTask extends QueryGroupTask implements SearchBackpressureTask { // generating metadata in a lazy way since source can be quite big private final MemoizedSupplier metadataSupplier; diff --git a/server/src/main/java/org/opensearch/action/search/SearchTask.java b/server/src/main/java/org/opensearch/action/search/SearchTask.java index d3c1043c50cce..2a1a961e7607b 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchTask.java +++ b/server/src/main/java/org/opensearch/action/search/SearchTask.java @@ -35,8 +35,8 @@ import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.tasks.TaskId; -import org.opensearch.tasks.CancellableTask; import org.opensearch.tasks.SearchBackpressureTask; +import org.opensearch.wlm.QueryGroupTask; import java.util.Map; import java.util.function.Supplier; @@ -49,7 +49,7 @@ * @opensearch.api */ @PublicApi(since = "1.0.0") -public class SearchTask extends CancellableTask implements SearchBackpressureTask { +public class SearchTask extends QueryGroupTask implements SearchBackpressureTask { // generating description in a lazy way since source can be quite big private final Supplier descriptionSupplier; private SearchProgressListener progressListener = SearchProgressListener.NOOP; diff --git a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java index 7d3237d43cd5c..88bf7ebea8e52 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java @@ -101,6 +101,7 @@ import org.opensearch.transport.RemoteTransportException; import org.opensearch.transport.Transport; import org.opensearch.transport.TransportService; +import org.opensearch.wlm.QueryGroupTask; import java.util.ArrayList; import java.util.Arrays; @@ -442,6 +443,12 @@ private void executeRequest( ); searchRequestContext.getSearchRequestOperationsListener().onRequestStart(searchRequestContext); + // At this point either the QUERY_GROUP_ID header will be present in ThreadContext either via ActionFilter + // or HTTP header (HTTP header will be deprecated once ActionFilter is implemented) + if (task instanceof QueryGroupTask) { + ((QueryGroupTask) task).setQueryGroupId(threadPool.getThreadContext()); + } + PipelinedRequest searchRequest; ActionListener listener; try { diff --git a/server/src/main/java/org/opensearch/client/OriginSettingClient.java b/server/src/main/java/org/opensearch/client/OriginSettingClient.java index 1b0e08cc489c4..27d87227df7bc 100644 --- a/server/src/main/java/org/opensearch/client/OriginSettingClient.java +++ b/server/src/main/java/org/opensearch/client/OriginSettingClient.java @@ -36,6 +36,7 @@ import org.opensearch.action.ActionType; import org.opensearch.action.support.ContextPreservingActionListener; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.common.util.concurrent.ThreadContextAccess; import org.opensearch.core.action.ActionListener; import org.opensearch.core.action.ActionResponse; @@ -65,7 +66,11 @@ protected void ActionListener listener ) { final Supplier supplier = in().threadPool().getThreadContext().newRestorableContext(false); - try (ThreadContext.StoredContext ignore = in().threadPool().getThreadContext().stashWithOrigin(origin)) { + try ( + ThreadContext.StoredContext ignore = ThreadContextAccess.doPrivileged( + () -> in().threadPool().getThreadContext().stashWithOrigin(origin) + ) + ) { super.doExecute(action, request, new ContextPreservingActionListener<>(supplier, listener)); } } diff --git a/server/src/main/java/org/opensearch/client/support/AbstractClient.java b/server/src/main/java/org/opensearch/client/support/AbstractClient.java index 6c6049f04231b..509cd732357d6 100644 --- a/server/src/main/java/org/opensearch/client/support/AbstractClient.java +++ b/server/src/main/java/org/opensearch/client/support/AbstractClient.java @@ -416,6 +416,7 @@ import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.common.util.concurrent.ThreadContextAccess; import org.opensearch.core.action.ActionListener; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.bytes.BytesReference; @@ -2148,7 +2149,9 @@ protected void ActionListener listener ) { ThreadContext threadContext = threadPool().getThreadContext(); - try (ThreadContext.StoredContext ctx = threadContext.stashAndMergeHeaders(headers)) { + try ( + ThreadContext.StoredContext ctx = ThreadContextAccess.doPrivileged(() -> threadContext.stashAndMergeHeaders(headers)) + ) { super.doExecute(action, request, listener); } } diff --git a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java index 7fa63ae8abc62..c7820c2c9a365 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java @@ -105,7 +105,8 @@ public CoordinationState( .getLastAcceptedConfiguration(); this.publishVotes = new VoteCollection(); this.isRemoteStateEnabled = isRemoteStoreClusterStateEnabled(settings); - this.isRemotePublicationEnabled = FeatureFlags.isEnabled(REMOTE_PUBLICATION_EXPERIMENTAL) + this.isRemotePublicationEnabled = isRemoteStateEnabled + && FeatureFlags.isEnabled(REMOTE_PUBLICATION_EXPERIMENTAL) && localNode.isRemoteStatePublicationEnabled(); } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplate.java b/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplate.java index 594dda83c41e2..63bbe4144c4fb 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplate.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplate.java @@ -184,7 +184,7 @@ public ComposableIndexTemplate(StreamInput in) throws IOException { this.version = in.readOptionalVLong(); this.metadata = in.readMap(); this.dataStreamTemplate = in.readOptionalWriteable(DataStreamTemplate::new); - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_16_0)) { this.context = in.readOptionalWriteable(Context::new); } else { this.context = null; @@ -248,7 +248,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalVLong(this.version); out.writeMap(this.metadata); out.writeOptionalWriteable(dataStreamTemplate); - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_16_0)) { out.writeOptionalWriteable(context); } } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index 9e7fe23f29872..df0d2609ad83d 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -43,6 +43,7 @@ import org.opensearch.cluster.block.ClusterBlockLevel; import org.opensearch.cluster.node.DiscoveryNodeFilters; import org.opensearch.cluster.routing.allocation.IndexMetadataUpdater; +import org.opensearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; import org.opensearch.common.Nullable; import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.MapBuilder; @@ -686,6 +687,8 @@ public static APIBlock readFrom(StreamInput input) throws IOException { private final boolean isSystem; private final boolean isRemoteSnapshot; + private final int indexTotalShardsPerNodeLimit; + private IndexMetadata( final Index index, final long version, @@ -711,7 +714,8 @@ private IndexMetadata( final int routingPartitionSize, final ActiveShardCount waitForActiveShards, final Map rolloverInfos, - final boolean isSystem + final boolean isSystem, + final int indexTotalShardsPerNodeLimit ) { this.index = index; @@ -746,6 +750,7 @@ private IndexMetadata( this.rolloverInfos = Collections.unmodifiableMap(rolloverInfos); this.isSystem = isSystem; this.isRemoteSnapshot = IndexModule.Type.REMOTE_SNAPSHOT.match(this.settings); + this.indexTotalShardsPerNodeLimit = indexTotalShardsPerNodeLimit; assert numberOfShards * routingFactor == routingNumShards : routingNumShards + " must be a multiple of " + numberOfShards; } @@ -899,6 +904,10 @@ public Set inSyncAllocationIds(int shardId) { return inSyncAllocationIds.get(shardId); } + public int getIndexTotalShardsPerNodeLimit() { + return this.indexTotalShardsPerNodeLimit; + } + @Nullable public DiscoveryNodeFilters requireFilters() { return requireFilters; @@ -1583,6 +1592,8 @@ public IndexMetadata build() { ); } + final int indexTotalShardsPerNodeLimit = ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING.get(settings); + final String uuid = settings.get(SETTING_INDEX_UUID, INDEX_UUID_NA_VALUE); return new IndexMetadata( @@ -1610,7 +1621,8 @@ public IndexMetadata build() { routingPartitionSize, waitForActiveShards, rolloverInfos, - isSystem + isSystem, + indexTotalShardsPerNodeLimit ); } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java index 440b9e267cf0a..6163fd624c838 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java @@ -853,6 +853,12 @@ public Map views() { return Optional.ofNullable((ViewMetadata) this.custom(ViewMetadata.TYPE)).map(ViewMetadata::views).orElse(Collections.emptyMap()); } + public Map queryGroups() { + return Optional.ofNullable((QueryGroupMetadata) this.custom(QueryGroupMetadata.TYPE)) + .map(QueryGroupMetadata::queryGroups) + .orElse(Collections.emptyMap()); + } + public DecommissionAttributeMetadata decommissionAttributeMetadata() { return custom(DecommissionAttributeMetadata.TYPE); } @@ -1391,6 +1397,13 @@ public Builder put(final QueryGroup queryGroup) { return queryGroups(existing); } + public Builder remove(final QueryGroup queryGroup) { + Objects.requireNonNull(queryGroup, "queryGroup should not be null"); + Map existing = new HashMap<>(getQueryGroups()); + existing.remove(queryGroup.get_id()); + return queryGroups(existing); + } + private Map getQueryGroups() { return Optional.ofNullable(this.customs.get(QueryGroupMetadata.TYPE)) .map(o -> (QueryGroupMetadata) o) @@ -1824,9 +1837,7 @@ static void validateDataStreams(SortedMap indicesLooku if (dsMetadata != null) { for (DataStream ds : dsMetadata.dataStreams().values()) { String prefix = DataStream.BACKING_INDEX_PREFIX + ds.getName() + "-"; - Set conflicts = indicesLookup.subMap(prefix, DataStream.BACKING_INDEX_PREFIX + ds.getName() + ".") // '.' is the - // char after - // '-' + Set conflicts = indicesLookup.subMap(prefix, DataStream.BACKING_INDEX_PREFIX + ds.getName() + ".") .keySet() .stream() .filter(s -> NUMBER_PATTERN.matcher(s.substring(prefix.length())).matches()) diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java index 7bc3d279513cd..6b638c9920c27 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java @@ -944,7 +944,7 @@ static ClusterState innerRemoveIndexTemplateV2(ClusterState currentState, String static Set dataStreamsUsingTemplate(final ClusterState state, final String templateName) { final ComposableIndexTemplate template = state.metadata().templatesV2().get(templateName); - if (template == null) { + if (template == null || template.getDataStreamTemplate() == null) { return Collections.emptySet(); } final Set dataStreams = state.metadata().dataStreams().keySet(); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/QueryGroup.java b/server/src/main/java/org/opensearch/cluster/metadata/QueryGroup.java index beaab198073df..9b5c6bc2369a6 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/QueryGroup.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/QueryGroup.java @@ -29,38 +29,40 @@ * Class to define the QueryGroup schema * { * "_id": "fafjafjkaf9ag8a9ga9g7ag0aagaga", - * "resourceLimits": { - * "jvm": 0.4 + * "resource_limits": { + * "memory": 0.4 * }, * "resiliency_mode": "enforced", * "name": "analytics", - * "updatedAt": 4513232415 + * "updated_at": 4513232415 * } */ @ExperimentalApi public class QueryGroup extends AbstractDiffable implements ToXContentObject { + public static final String _ID_STRING = "_id"; + public static final String NAME_STRING = "name"; + public static final String RESILIENCY_MODE_STRING = "resiliency_mode"; + public static final String UPDATED_AT_STRING = "updated_at"; + public static final String RESOURCE_LIMITS_STRING = "resource_limits"; private static final int MAX_CHARS_ALLOWED_IN_NAME = 50; private final String name; private final String _id; private final ResiliencyMode resiliencyMode; // It is an epoch in millis private final long updatedAtInMillis; - private final Map resourceLimits; + private final Map resourceLimits; - public QueryGroup(String name, ResiliencyMode resiliencyMode, Map resourceLimits) { + public QueryGroup(String name, ResiliencyMode resiliencyMode, Map resourceLimits) { this(name, UUIDs.randomBase64UUID(), resiliencyMode, resourceLimits, Instant.now().getMillis()); } - public QueryGroup(String name, String _id, ResiliencyMode resiliencyMode, Map resourceLimits, long updatedAt) { + public QueryGroup(String name, String _id, ResiliencyMode resiliencyMode, Map resourceLimits, long updatedAt) { Objects.requireNonNull(name, "QueryGroup.name can't be null"); Objects.requireNonNull(resourceLimits, "QueryGroup.resourceLimits can't be null"); Objects.requireNonNull(resiliencyMode, "QueryGroup.resiliencyMode can't be null"); Objects.requireNonNull(_id, "QueryGroup._id can't be null"); - - if (name.length() > MAX_CHARS_ALLOWED_IN_NAME) { - throw new IllegalArgumentException("QueryGroup.name shouldn't be more than 50 chars long"); - } + validateName(name); if (resourceLimits.isEmpty()) { throw new IllegalArgumentException("QueryGroup.resourceLimits should at least have 1 resource limit"); @@ -92,7 +94,7 @@ public QueryGroup(StreamInput in) throws IOException { in.readString(), in.readString(), ResiliencyMode.fromName(in.readString()), - in.readMap((i) -> ResourceType.fromName(i.readString()), StreamInput::readGenericValue), + in.readMap((i) -> ResourceType.fromName(i.readString()), StreamInput::readDouble), in.readLong() ); } @@ -102,18 +104,24 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(name); out.writeString(_id); out.writeString(resiliencyMode.getName()); - out.writeMap(resourceLimits, ResourceType::writeTo, StreamOutput::writeGenericValue); + out.writeMap(resourceLimits, ResourceType::writeTo, StreamOutput::writeDouble); out.writeLong(updatedAtInMillis); } - private void validateResourceLimits(Map resourceLimits) { - for (Map.Entry resource : resourceLimits.entrySet()) { - Double threshold = (Double) resource.getValue(); + public static void validateName(String name) { + if (name == null || name.isEmpty() || name.length() > MAX_CHARS_ALLOWED_IN_NAME) { + throw new IllegalArgumentException("QueryGroup.name shouldn't be null, empty or more than 50 chars long"); + } + } + + private void validateResourceLimits(Map resourceLimits) { + for (Map.Entry resource : resourceLimits.entrySet()) { + Double threshold = resource.getValue(); Objects.requireNonNull(resource.getKey(), "resourceName can't be null"); Objects.requireNonNull(threshold, "resource limit threshold for" + resource.getKey().getName() + " : can't be null"); - if (Double.compare(threshold, 1.0) > 0) { - throw new IllegalArgumentException("resource value should be less than 1.0"); + if (Double.compare(threshold, 0.0) <= 0 || Double.compare(threshold, 1.0) > 0) { + throw new IllegalArgumentException("resource value should be greater than 0 and less or equal to 1.0"); } } } @@ -121,12 +129,12 @@ private void validateResourceLimits(Map resourceLimits) { @Override public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { builder.startObject(); - builder.field("_id", _id); - builder.field("name", name); - builder.field("resiliency_mode", resiliencyMode.getName()); - builder.field("updatedAt", updatedAtInMillis); + builder.field(_ID_STRING, _id); + builder.field(NAME_STRING, name); + builder.field(RESILIENCY_MODE_STRING, resiliencyMode.getName()); + builder.field(UPDATED_AT_STRING, updatedAtInMillis); // write resource limits - builder.startObject("resourceLimits"); + builder.startObject(RESOURCE_LIMITS_STRING); for (ResourceType resourceType : ResourceType.values()) { if (resourceLimits.containsKey(resourceType)) { builder.field(resourceType.getName(), resourceLimits.get(resourceType)); @@ -139,56 +147,7 @@ public XContentBuilder toXContent(final XContentBuilder builder, final Params pa } public static QueryGroup fromXContent(final XContentParser parser) throws IOException { - if (parser.currentToken() == null) { // fresh parser? move to the first token - parser.nextToken(); - } - - Builder builder = builder(); - - XContentParser.Token token = parser.currentToken(); - - if (token != XContentParser.Token.START_OBJECT) { - throw new IllegalArgumentException("Expected START_OBJECT token but found [" + parser.currentName() + "]"); - } - - String fieldName = ""; - // Map to hold resources - final Map resourceLimits = new HashMap<>(); - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - fieldName = parser.currentName(); - } else if (token.isValue()) { - if (fieldName.equals("_id")) { - builder._id(parser.text()); - } else if (fieldName.equals("name")) { - builder.name(parser.text()); - } else if (fieldName.equals("resiliency_mode")) { - builder.mode(parser.text()); - } else if (fieldName.equals("updatedAt")) { - builder.updatedAt(parser.longValue()); - } else { - throw new IllegalArgumentException(fieldName + " is not a valid field in QueryGroup"); - } - } else if (token == XContentParser.Token.START_OBJECT) { - - if (!fieldName.equals("resourceLimits")) { - throw new IllegalArgumentException( - "QueryGroup.resourceLimits is an object and expected token was { " + " but found " + token - ); - } - - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - fieldName = parser.currentName(); - } else { - resourceLimits.put(ResourceType.fromName(fieldName), parser.doubleValue()); - } - } - - } - } - builder.resourceLimits(resourceLimits); - return builder.build(); + return Builder.fromXContent(parser).build(); } public static Diff readDiff(final StreamInput in) throws IOException { @@ -201,6 +160,7 @@ public boolean equals(Object o) { if (o == null || getClass() != o.getClass()) return false; QueryGroup that = (QueryGroup) o; return Objects.equals(name, that.name) + && Objects.equals(resiliencyMode, that.resiliencyMode) && Objects.equals(resourceLimits, that.resourceLimits) && Objects.equals(_id, that._id) && updatedAtInMillis == that.updatedAtInMillis; @@ -219,7 +179,7 @@ public ResiliencyMode getResiliencyMode() { return resiliencyMode; } - public Map getResourceLimits() { + public Map getResourceLimits() { return resourceLimits; } @@ -268,7 +228,6 @@ public static ResiliencyMode fromName(String s) { } throw new IllegalArgumentException("Invalid value for QueryGroupMode: " + s); } - } /** @@ -280,10 +239,62 @@ public static class Builder { private String _id; private ResiliencyMode resiliencyMode; private long updatedAt; - private Map resourceLimits; + private Map resourceLimits; private Builder() {} + public static Builder fromXContent(XContentParser parser) throws IOException { + if (parser.currentToken() == null) { // fresh parser? move to the first token + parser.nextToken(); + } + + Builder builder = builder(); + + XContentParser.Token token = parser.currentToken(); + + if (token != XContentParser.Token.START_OBJECT) { + throw new IllegalArgumentException("Expected START_OBJECT token but found [" + parser.currentName() + "]"); + } + + String fieldName = ""; + // Map to hold resources + final Map resourceLimits = new HashMap<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + fieldName = parser.currentName(); + } else if (token.isValue()) { + if (fieldName.equals(_ID_STRING)) { + builder._id(parser.text()); + } else if (fieldName.equals(NAME_STRING)) { + builder.name(parser.text()); + } else if (fieldName.equals(RESILIENCY_MODE_STRING)) { + builder.mode(parser.text()); + } else if (fieldName.equals(UPDATED_AT_STRING)) { + builder.updatedAt(parser.longValue()); + } else { + throw new IllegalArgumentException(fieldName + " is not a valid field in QueryGroup"); + } + } else if (token == XContentParser.Token.START_OBJECT) { + + if (!fieldName.equals(RESOURCE_LIMITS_STRING)) { + throw new IllegalArgumentException( + "QueryGroup.resourceLimits is an object and expected token was { " + " but found " + token + ); + } + + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + fieldName = parser.currentName(); + } else { + resourceLimits.put(ResourceType.fromName(fieldName), parser.doubleValue()); + } + } + + } + } + return builder.resourceLimits(resourceLimits); + } + public Builder name(String name) { this.name = name; return this; @@ -304,7 +315,7 @@ public Builder updatedAt(long updatedAt) { return this; } - public Builder resourceLimits(Map resourceLimits) { + public Builder resourceLimits(Map resourceLimits) { this.resourceLimits = resourceLimits; return this; } @@ -312,6 +323,5 @@ public Builder resourceLimits(Map resourceLimits) { public QueryGroup build() { return new QueryGroup(name, _id, resiliencyMode, resourceLimits, updatedAt); } - } } diff --git a/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java b/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java index 6158461c7d4e9..6242247f34a93 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java +++ b/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java @@ -42,8 +42,10 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.Strings; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.IndexModule; import org.opensearch.index.IndexNotFoundException; import org.opensearch.node.ResponseCollectorService; @@ -245,6 +247,13 @@ public GroupShardsIterator searchShards( preference = Preference.PRIMARY.type(); } + if (FeatureFlags.isEnabled(FeatureFlags.TIERED_REMOTE_INDEX) + && IndexModule.DataLocalityType.PARTIAL.name() + .equals(indexMetadataForShard.getSettings().get(IndexModule.INDEX_STORE_LOCALITY_SETTING.getKey())) + && (preference == null || preference.isEmpty())) { + preference = Preference.PRIMARY_FIRST.type(); + } + ShardIterator iterator = preferenceActiveShardIterator( shard, clusterState.nodes().getLocalNodeId(), diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index b2443490dd973..212583d1fb14f 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -154,6 +154,13 @@ public class BalancedShardsAllocator implements ShardsAllocator { Property.NodeScope ); + public static final Setting IGNORE_THROTTLE_FOR_REMOTE_RESTORE = Setting.boolSetting( + "cluster.routing.allocation.remote_primary.ignore_throttle", + true, + Property.Dynamic, + Property.NodeScope + ); + public static final Setting PRIMARY_SHARD_REBALANCE_BUFFER = Setting.floatSetting( "cluster.routing.allocation.rebalance.primary.buffer", 0.10f, @@ -173,6 +180,8 @@ public class BalancedShardsAllocator implements ShardsAllocator { private volatile WeightFunction weightFunction; private volatile float threshold; + private volatile boolean ignoreThrottleInRestore; + public BalancedShardsAllocator(Settings settings) { this(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); } @@ -182,6 +191,7 @@ public BalancedShardsAllocator(Settings settings, ClusterSettings clusterSetting setShardBalanceFactor(SHARD_BALANCE_FACTOR_SETTING.get(settings)); setIndexBalanceFactor(INDEX_BALANCE_FACTOR_SETTING.get(settings)); setPreferPrimaryShardRebalanceBuffer(PRIMARY_SHARD_REBALANCE_BUFFER.get(settings)); + setIgnoreThrottleInRestore(IGNORE_THROTTLE_FOR_REMOTE_RESTORE.get(settings)); updateWeightFunction(); setThreshold(THRESHOLD_SETTING.get(settings)); setPreferPrimaryShardBalance(PREFER_PRIMARY_SHARD_BALANCE.get(settings)); @@ -195,6 +205,7 @@ public BalancedShardsAllocator(Settings settings, ClusterSettings clusterSetting clusterSettings.addSettingsUpdateConsumer(PRIMARY_SHARD_REBALANCE_BUFFER, this::updatePreferPrimaryShardBalanceBuffer); clusterSettings.addSettingsUpdateConsumer(PREFER_PRIMARY_SHARD_REBALANCE, this::setPreferPrimaryShardRebalance); clusterSettings.addSettingsUpdateConsumer(THRESHOLD_SETTING, this::setThreshold); + clusterSettings.addSettingsUpdateConsumer(IGNORE_THROTTLE_FOR_REMOTE_RESTORE, this::setIgnoreThrottleInRestore); } /** @@ -205,6 +216,10 @@ private void setMovePrimaryFirst(boolean movePrimaryFirst) { setShardMovementStrategy(this.shardMovementStrategy); } + private void setIgnoreThrottleInRestore(boolean ignoreThrottleInRestore) { + this.ignoreThrottleInRestore = ignoreThrottleInRestore; + } + /** * Sets the correct Shard movement strategy to use. * If users are still using deprecated setting `move_primary_first`, we want behavior to remain unchanged. @@ -282,7 +297,8 @@ public void allocate(RoutingAllocation allocation) { weightFunction, threshold, preferPrimaryShardBalance, - preferPrimaryShardRebalance + preferPrimaryShardRebalance, + ignoreThrottleInRestore ); localShardsBalancer.allocateUnassigned(); localShardsBalancer.moveShards(); @@ -304,7 +320,8 @@ public ShardAllocationDecision decideShardAllocation(final ShardRouting shard, f weightFunction, threshold, preferPrimaryShardBalance, - preferPrimaryShardRebalance + preferPrimaryShardRebalance, + ignoreThrottleInRestore ); AllocateUnassignedDecision allocateUnassignedDecision = AllocateUnassignedDecision.NOT_TAKEN; MoveDecision moveDecision = MoveDecision.NOT_TAKEN; @@ -459,6 +476,7 @@ void updateRebalanceConstraint(String constraint, boolean add) { public static class ModelNode implements Iterable { private final Map indices = new HashMap<>(); private int numShards = 0; + private int numPrimaryShards = 0; private final RoutingNode routingNode; ModelNode(RoutingNode routingNode) { @@ -492,7 +510,7 @@ public int numPrimaryShards(String idx) { } public int numPrimaryShards() { - return indices.values().stream().mapToInt(index -> index.numPrimaryShards()).sum(); + return numPrimaryShards; } public int highestPrimary(String index) { @@ -510,6 +528,10 @@ public void addShard(ShardRouting shard) { indices.put(index.getIndexId(), index); } index.addShard(shard); + if (shard.primary()) { + numPrimaryShards++; + } + numShards++; } @@ -521,6 +543,11 @@ public void removeShard(ShardRouting shard) { indices.remove(shard.getIndexName()); } } + + if (shard.primary()) { + numPrimaryShards--; + } + numShards--; } @@ -558,7 +585,7 @@ public Balancer( float threshold, boolean preferPrimaryBalance ) { - super(logger, allocation, shardMovementStrategy, weight, threshold, preferPrimaryBalance, false); + super(logger, allocation, shardMovementStrategy, weight, threshold, preferPrimaryBalance, false, false); } } diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java index 00eb79add9f1d..7e4ae58548c55 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java @@ -13,6 +13,7 @@ import org.apache.lucene.util.IntroSorter; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.RoutingNode; import org.opensearch.cluster.routing.RoutingNodes; import org.opensearch.cluster.routing.RoutingPool; @@ -60,6 +61,8 @@ public class LocalShardsBalancer extends ShardsBalancer { private final boolean preferPrimaryBalance; private final boolean preferPrimaryRebalance; + + private final boolean ignoreThrottleInRestore; private final BalancedShardsAllocator.WeightFunction weight; private final float threshold; @@ -77,7 +80,8 @@ public LocalShardsBalancer( BalancedShardsAllocator.WeightFunction weight, float threshold, boolean preferPrimaryBalance, - boolean preferPrimaryRebalance + boolean preferPrimaryRebalance, + boolean ignoreThrottleInRestore ) { this.logger = logger; this.allocation = allocation; @@ -94,6 +98,7 @@ public LocalShardsBalancer( this.preferPrimaryBalance = preferPrimaryBalance; this.preferPrimaryRebalance = preferPrimaryRebalance; this.shardMovementStrategy = shardMovementStrategy; + this.ignoreThrottleInRestore = ignoreThrottleInRestore; } /** @@ -918,7 +923,15 @@ AllocateUnassignedDecision decideAllocateUnassigned(final ShardRouting shard) { nodeExplanationMap.put(node.getNodeId(), new NodeAllocationResult(node.getRoutingNode().node(), currentDecision, 0)); nodeWeights.add(Tuple.tuple(node.getNodeId(), currentWeight)); } - if (currentDecision.type() == Decision.Type.YES || currentDecision.type() == Decision.Type.THROTTLE) { + + // For REMOTE_STORE recoveries, THROTTLE is as good as NO as we want faster recoveries + // The side effect of this are increased relocations post these allocations. + boolean considerThrottleAsNo = ignoreThrottleInRestore + && shard.recoverySource().getType() == RecoverySource.Type.REMOTE_STORE + && shard.primary(); + + if (currentDecision.type() == Decision.Type.YES + || (currentDecision.type() == Decision.Type.THROTTLE && considerThrottleAsNo == false)) { final boolean updateMinNode; if (currentWeight == minWeight) { /* we have an equal weight tie breaking: diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java index c008102554e8c..6f211f370de95 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java @@ -32,7 +32,6 @@ package org.opensearch.cluster.routing.allocation.decider; -import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.routing.RoutingNode; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.ShardRoutingState; @@ -125,8 +124,7 @@ private Decision doDecide( RoutingAllocation allocation, BiPredicate decider ) { - IndexMetadata indexMd = allocation.metadata().getIndexSafe(shardRouting.index()); - final int indexShardLimit = INDEX_TOTAL_SHARDS_PER_NODE_SETTING.get(indexMd.getSettings(), settings); + final int indexShardLimit = allocation.metadata().getIndexSafe(shardRouting.index()).getIndexTotalShardsPerNodeLimit(); // Capture the limit here in case it changes during this method's // execution final int clusterShardLimit = this.clusterShardLimit; diff --git a/server/src/main/java/org/opensearch/cluster/routing/remote/InternalRemoteRoutingTableService.java b/server/src/main/java/org/opensearch/cluster/routing/remote/InternalRemoteRoutingTableService.java index 3c578a8c5c01f..0f1ff3138ef90 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/remote/InternalRemoteRoutingTableService.java +++ b/server/src/main/java/org/opensearch/cluster/routing/remote/InternalRemoteRoutingTableService.java @@ -20,14 +20,15 @@ import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.remote.RemoteWritableEntityStore; +import org.opensearch.common.remote.RemoteWriteableEntityBlobStore; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.action.ActionListener; import org.opensearch.core.compress.Compressor; import org.opensearch.gateway.remote.ClusterMetadataManifest; +import org.opensearch.gateway.remote.RemoteClusterStateUtils; import org.opensearch.gateway.remote.RemoteStateTransferException; -import org.opensearch.gateway.remote.model.RemoteClusterStateBlobStore; import org.opensearch.gateway.remote.model.RemoteRoutingTableBlobStore; import org.opensearch.gateway.remote.routingtable.RemoteIndexRoutingTable; import org.opensearch.gateway.remote.routingtable.RemoteRoutingTableDiff; @@ -262,12 +263,13 @@ protected void doStart() { clusterSettings ); - this.remoteRoutingTableDiffStore = new RemoteClusterStateBlobStore<>( + this.remoteRoutingTableDiffStore = new RemoteWriteableEntityBlobStore<>( new BlobStoreTransferService(blobStoreRepository.blobStore(), threadPool), blobStoreRepository, clusterName, threadPool, - ThreadPool.Names.REMOTE_STATE_READ + ThreadPool.Names.REMOTE_STATE_READ, + RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN ); } diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java b/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java index 6234427445754..b2548a8976c73 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java @@ -61,6 +61,7 @@ import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.common.util.concurrent.ThreadContextAccess; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; import org.opensearch.telemetry.metrics.tags.Tags; @@ -396,7 +397,7 @@ private void submitStateUpdateTask( final ThreadContext threadContext = threadPool.getThreadContext(); final Supplier supplier = threadContext.newRestorableContext(true); try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { - threadContext.markAsSystemContext(); + ThreadContextAccess.doPrivilegedVoid(threadContext::markAsSystemContext); final UpdateTask updateTask = new UpdateTask( config.priority(), source, diff --git a/server/src/main/java/org/opensearch/cluster/service/MasterService.java b/server/src/main/java/org/opensearch/cluster/service/MasterService.java index 4ab8255df7658..713de8cdd0fda 100644 --- a/server/src/main/java/org/opensearch/cluster/service/MasterService.java +++ b/server/src/main/java/org/opensearch/cluster/service/MasterService.java @@ -66,6 +66,7 @@ import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.common.util.concurrent.ThreadContextAccess; import org.opensearch.core.Assertions; import org.opensearch.core.common.text.Text; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; @@ -1022,7 +1023,7 @@ public void submitStateUpdateTasks( final ThreadContext threadContext = threadPool.getThreadContext(); final Supplier supplier = threadContext.newRestorableContext(true); try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { - threadContext.markAsSystemContext(); + ThreadContextAccess.doPrivilegedVoid(threadContext::markAsSystemContext); List safeTasks = tasks.entrySet() .stream() diff --git a/server/src/main/java/org/opensearch/common/logging/JsonThrowablePatternConverter.java b/server/src/main/java/org/opensearch/common/logging/JsonThrowablePatternConverter.java index ed324e4e62d8f..ee21c343e2ea1 100644 --- a/server/src/main/java/org/opensearch/common/logging/JsonThrowablePatternConverter.java +++ b/server/src/main/java/org/opensearch/common/logging/JsonThrowablePatternConverter.java @@ -38,7 +38,7 @@ import org.apache.logging.log4j.core.pattern.ExtendedThrowablePatternConverter; import org.apache.logging.log4j.core.pattern.PatternConverter; import org.apache.logging.log4j.core.pattern.ThrowablePatternConverter; -import org.apache.logging.log4j.util.Strings; +import org.opensearch.core.common.Strings; import java.nio.charset.Charset; import java.util.StringJoiner; @@ -84,7 +84,7 @@ public static JsonThrowablePatternConverter newInstance(final Configuration conf @Override public void format(final LogEvent event, final StringBuilder toAppendTo) { String consoleStacktrace = formatStacktrace(event); - if (Strings.isNotEmpty(consoleStacktrace)) { + if (!Strings.isNullOrEmpty(consoleStacktrace)) { String jsonStacktrace = formatJson(consoleStacktrace); toAppendTo.append(", "); diff --git a/server/src/main/java/org/opensearch/common/remote/AbstractClusterMetadataWriteableBlobEntity.java b/server/src/main/java/org/opensearch/common/remote/AbstractClusterMetadataWriteableBlobEntity.java new file mode 100644 index 0000000000000..b9492da04680d --- /dev/null +++ b/server/src/main/java/org/opensearch/common/remote/AbstractClusterMetadataWriteableBlobEntity.java @@ -0,0 +1,43 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.remote; + +import org.opensearch.core.compress.Compressor; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedMetadata; + +/** + * An extension of {@link RemoteWriteableEntity} class which caters to the use case of writing to and reading from a blob storage + * + * @param The class type which can be uploaded to or downloaded from a blob storage. + */ +public abstract class AbstractClusterMetadataWriteableBlobEntity extends RemoteWriteableBlobEntity { + + protected final NamedXContentRegistry namedXContentRegistry; + + public AbstractClusterMetadataWriteableBlobEntity( + final String clusterUUID, + final Compressor compressor, + final NamedXContentRegistry namedXContentRegistry + ) { + super(clusterUUID, compressor); + this.namedXContentRegistry = namedXContentRegistry; + } + + public AbstractClusterMetadataWriteableBlobEntity(final String clusterUUID, final Compressor compressor) { + super(clusterUUID, compressor); + this.namedXContentRegistry = null; + } + + public abstract UploadedMetadata getUploadedMetadata(); + + public NamedXContentRegistry getNamedXContentRegistry() { + return namedXContentRegistry; + } +} diff --git a/server/src/main/java/org/opensearch/common/remote/AbstractRemoteWritableEntityManager.java b/server/src/main/java/org/opensearch/common/remote/AbstractRemoteWritableEntityManager.java index dc301635c4a80..8e2de1580a49f 100644 --- a/server/src/main/java/org/opensearch/common/remote/AbstractRemoteWritableEntityManager.java +++ b/server/src/main/java/org/opensearch/common/remote/AbstractRemoteWritableEntityManager.java @@ -31,7 +31,7 @@ public abstract class AbstractRemoteWritableEntityManager implements RemoteWrita * @return the remote writable entity store for the given entity * @throws IllegalArgumentException if the entity type is unknown */ - protected RemoteWritableEntityStore getStore(AbstractRemoteWritableBlobEntity entity) { + protected RemoteWritableEntityStore getStore(AbstractClusterMetadataWriteableBlobEntity entity) { RemoteWritableEntityStore remoteStore = remoteWritableEntityStores.get(entity.getType()); if (remoteStore == null) { throw new IllegalArgumentException("Unknown entity type [" + entity.getType() + "]"); @@ -49,7 +49,7 @@ protected RemoteWritableEntityStore getStore(AbstractRemoteWritableBlobEntity en */ protected abstract ActionListener getWrappedWriteListener( String component, - AbstractRemoteWritableBlobEntity remoteEntity, + AbstractClusterMetadataWriteableBlobEntity remoteEntity, ActionListener listener ); @@ -64,21 +64,21 @@ protected abstract ActionListener getWrappedWriteListener( */ protected abstract ActionListener getWrappedReadListener( String component, - AbstractRemoteWritableBlobEntity remoteEntity, + AbstractClusterMetadataWriteableBlobEntity remoteEntity, ActionListener listener ); @Override public void writeAsync( String component, - AbstractRemoteWritableBlobEntity entity, + AbstractClusterMetadataWriteableBlobEntity entity, ActionListener listener ) { getStore(entity).writeAsync(entity, getWrappedWriteListener(component, entity, listener)); } @Override - public void readAsync(String component, AbstractRemoteWritableBlobEntity entity, ActionListener listener) { + public void readAsync(String component, AbstractClusterMetadataWriteableBlobEntity entity, ActionListener listener) { getStore(entity).readAsync(entity, getWrappedReadListener(component, entity, listener)); } } diff --git a/server/src/main/java/org/opensearch/common/remote/RemoteWritableEntityManager.java b/server/src/main/java/org/opensearch/common/remote/RemoteWritableEntityManager.java index 7693d1b5284bd..c27598e368e4d 100644 --- a/server/src/main/java/org/opensearch/common/remote/RemoteWritableEntityManager.java +++ b/server/src/main/java/org/opensearch/common/remote/RemoteWritableEntityManager.java @@ -29,7 +29,7 @@ public interface RemoteWritableEntityManager { * {@link ActionListener#onFailure(Exception)} method is called with * an exception if the read operation fails. */ - void readAsync(String component, AbstractRemoteWritableBlobEntity entity, ActionListener listener); + void readAsync(String component, AbstractClusterMetadataWriteableBlobEntity entity, ActionListener listener); /** * Performs an asynchronous write operation for the specified component and entity. @@ -43,5 +43,5 @@ public interface RemoteWritableEntityManager { * {@link ActionListener#onFailure(Exception)} method is called with * an exception if the write operation fails. */ - void writeAsync(String component, AbstractRemoteWritableBlobEntity entity, ActionListener listener); + void writeAsync(String component, AbstractClusterMetadataWriteableBlobEntity entity, ActionListener listener); } diff --git a/server/src/main/java/org/opensearch/common/remote/AbstractRemoteWritableBlobEntity.java b/server/src/main/java/org/opensearch/common/remote/RemoteWriteableBlobEntity.java similarity index 62% rename from server/src/main/java/org/opensearch/common/remote/AbstractRemoteWritableBlobEntity.java rename to server/src/main/java/org/opensearch/common/remote/RemoteWriteableBlobEntity.java index 237c077cb673c..f034ce2d1adf1 100644 --- a/server/src/main/java/org/opensearch/common/remote/AbstractRemoteWritableBlobEntity.java +++ b/server/src/main/java/org/opensearch/common/remote/RemoteWriteableBlobEntity.java @@ -10,38 +10,25 @@ import org.opensearch.common.blobstore.BlobPath; import org.opensearch.core.compress.Compressor; -import org.opensearch.core.xcontent.NamedXContentRegistry; -import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedMetadata; import static org.opensearch.gateway.remote.RemoteClusterStateUtils.PATH_DELIMITER; /** - * An extension of {@link RemoteWriteableEntity} class which caters to the use case of writing to and reading from a blob storage - * - * @param The class type which can be uploaded to or downloaded from a blob storage. + * The abstract class which represents a {@link RemoteWriteableEntity} that can be written to a store + * @param the entity to be written */ -public abstract class AbstractRemoteWritableBlobEntity implements RemoteWriteableEntity { +public abstract class RemoteWriteableBlobEntity implements RemoteWriteableEntity { protected String blobFileName; protected String blobName; private final String clusterUUID; private final Compressor compressor; - private final NamedXContentRegistry namedXContentRegistry; private String[] pathTokens; - public AbstractRemoteWritableBlobEntity( - final String clusterUUID, - final Compressor compressor, - final NamedXContentRegistry namedXContentRegistry - ) { + public RemoteWriteableBlobEntity(final String clusterUUID, final Compressor compressor) { this.clusterUUID = clusterUUID; this.compressor = compressor; - this.namedXContentRegistry = namedXContentRegistry; - } - - public AbstractRemoteWritableBlobEntity(final String clusterUUID, final Compressor compressor) { - this(clusterUUID, compressor, null); } public abstract BlobPathParameters getBlobPathParameters(); @@ -80,16 +67,10 @@ public String clusterUUID() { return clusterUUID; } - public abstract UploadedMetadata getUploadedMetadata(); - public void setFullBlobName(BlobPath blobPath) { this.blobName = blobPath.buildAsString() + blobFileName; } - public NamedXContentRegistry getNamedXContentRegistry() { - return namedXContentRegistry; - } - protected Compressor getCompressor() { return compressor; } diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterStateBlobStore.java b/server/src/main/java/org/opensearch/common/remote/RemoteWriteableEntityBlobStore.java similarity index 78% rename from server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterStateBlobStore.java rename to server/src/main/java/org/opensearch/common/remote/RemoteWriteableEntityBlobStore.java index cd8b8aa41ad65..baa44a7c9bde9 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterStateBlobStore.java +++ b/server/src/main/java/org/opensearch/common/remote/RemoteWriteableEntityBlobStore.java @@ -6,49 +6,48 @@ * compatible open source license. */ -package org.opensearch.gateway.remote.model; +package org.opensearch.common.remote; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.stream.write.WritePriority; -import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; -import org.opensearch.common.remote.RemoteWritableEntityStore; -import org.opensearch.common.remote.RemoteWriteableEntity; import org.opensearch.core.action.ActionListener; -import org.opensearch.gateway.remote.RemoteClusterStateUtils; import org.opensearch.index.translog.transfer.BlobStoreTransferService; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.threadpool.ThreadPool; import java.io.IOException; import java.io.InputStream; +import java.nio.charset.StandardCharsets; +import java.util.Base64; import java.util.concurrent.ExecutorService; -import static org.opensearch.gateway.remote.RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN; - /** * Abstract class for a blob type storage * * @param The entity which can be uploaded to / downloaded from blob store * @param The concrete class implementing {@link RemoteWriteableEntity} which is used as a wrapper for T entity. */ -public class RemoteClusterStateBlobStore> implements RemoteWritableEntityStore { +public class RemoteWriteableEntityBlobStore> implements RemoteWritableEntityStore { private final BlobStoreTransferService transferService; private final BlobStoreRepository blobStoreRepository; private final String clusterName; private final ExecutorService executorService; + private final String pathToken; - public RemoteClusterStateBlobStore( + public RemoteWriteableEntityBlobStore( final BlobStoreTransferService blobStoreTransferService, final BlobStoreRepository blobStoreRepository, final String clusterName, final ThreadPool threadPool, - final String executor + final String executor, + final String pathToken ) { this.transferService = blobStoreTransferService; this.blobStoreRepository = blobStoreRepository; this.clusterName = clusterName; this.executorService = threadPool.executor(executor); + this.pathToken = pathToken; } @Override @@ -95,13 +94,10 @@ public String getClusterName() { } public BlobPath getBlobPathPrefix(String clusterUUID) { - return blobStoreRepository.basePath() - .add(RemoteClusterStateUtils.encodeString(getClusterName())) - .add(CLUSTER_STATE_PATH_TOKEN) - .add(clusterUUID); + return blobStoreRepository.basePath().add(encodeString(getClusterName())).add(pathToken).add(clusterUUID); } - public BlobPath getBlobPathForUpload(final AbstractRemoteWritableBlobEntity obj) { + public BlobPath getBlobPathForUpload(final RemoteWriteableBlobEntity obj) { BlobPath blobPath = getBlobPathPrefix(obj.clusterUUID()); for (String token : obj.getBlobPathParameters().getPathTokens()) { blobPath = blobPath.add(token); @@ -109,7 +105,7 @@ public BlobPath getBlobPathForUpload(final AbstractRemoteWritableBlobEntity o return blobPath; } - public BlobPath getBlobPathForDownload(final AbstractRemoteWritableBlobEntity obj) { + public BlobPath getBlobPathForDownload(final RemoteWriteableBlobEntity obj) { String[] pathTokens = obj.getBlobPathTokens(); BlobPath blobPath = new BlobPath(); if (pathTokens == null || pathTokens.length < 1) { @@ -122,4 +118,8 @@ public BlobPath getBlobPathForDownload(final AbstractRemoteWritableBlobEntity return blobPath; } + private static String encodeString(String content) { + return Base64.getUrlEncoder().withoutPadding().encodeToString(content.getBytes(StandardCharsets.UTF_8)); + } + } diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 2f60c731bc554..8daf9125bb27e 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -173,6 +173,7 @@ import org.opensearch.transport.SniffConnectionStrategy; import org.opensearch.transport.TransportSettings; import org.opensearch.watcher.ResourceWatcherService; +import org.opensearch.wlm.WorkloadManagementSettings; import java.util.Arrays; import java.util.Collections; @@ -268,6 +269,7 @@ public void apply(Settings value, Settings current, Settings previous) { BalancedShardsAllocator.SHARD_MOVE_PRIMARY_FIRST_SETTING, BalancedShardsAllocator.SHARD_MOVEMENT_STRATEGY_SETTING, BalancedShardsAllocator.THRESHOLD_SETTING, + BalancedShardsAllocator.IGNORE_THROTTLE_FOR_REMOTE_RESTORE, BreakerSettings.CIRCUIT_BREAKER_LIMIT_SETTING, BreakerSettings.CIRCUIT_BREAKER_OVERHEAD_SETTING, BreakerSettings.CIRCUIT_BREAKER_TYPE, @@ -757,12 +759,22 @@ public void apply(Settings value, Settings current, Settings previous) { RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING, RemoteStoreSettings.CLUSTER_REMOTE_MAX_TRANSLOG_READERS, RemoteStoreSettings.CLUSTER_REMOTE_STORE_TRANSLOG_METADATA, + RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_SCHEDULER_INTERVAL, + RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_LOOKBACK_INTERVAL, + RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED, + SearchService.CLUSTER_ALLOW_DERIVED_FIELD_SETTING, // Composite index settings CompositeIndexSettings.STAR_TREE_INDEX_ENABLED_SETTING, - SystemTemplatesService.SETTING_APPLICATION_BASED_CONFIGURATION_TEMPLATES_ENABLED + SystemTemplatesService.SETTING_APPLICATION_BASED_CONFIGURATION_TEMPLATES_ENABLED, + + // WorkloadManagement settings + WorkloadManagementSettings.NODE_LEVEL_CPU_REJECTION_THRESHOLD, + WorkloadManagementSettings.NODE_LEVEL_CPU_CANCELLATION_THRESHOLD, + WorkloadManagementSettings.NODE_LEVEL_MEMORY_REJECTION_THRESHOLD, + WorkloadManagementSettings.NODE_LEVEL_MEMORY_CANCELLATION_THRESHOLD ) ) ); diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index 6e7d77d0c00d4..284eb43aa5509 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -191,7 +191,6 @@ public final class IndexScopedSettings extends AbstractScopedSettings { BitsetFilterCache.INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING, IndexModule.INDEX_STORE_TYPE_SETTING, IndexModule.INDEX_STORE_PRE_LOAD_SETTING, - IndexModule.INDEX_STORE_HYBRID_MMAP_EXTENSIONS, IndexModule.INDEX_STORE_HYBRID_NIO_EXTENSIONS, IndexModule.INDEX_RECOVERY_TYPE_SETTING, IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING, @@ -239,6 +238,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { // Settings for concurrent segment search IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_SETTING, + IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_MAX_SLICE_COUNT, IndexSettings.ALLOW_DERIVED_FIELDS, // Settings for star tree index diff --git a/server/src/main/java/org/opensearch/common/util/BatchRunnableExecutor.java b/server/src/main/java/org/opensearch/common/util/BatchRunnableExecutor.java index d3d3304cb909a..cfe2bbb85bda4 100644 --- a/server/src/main/java/org/opensearch/common/util/BatchRunnableExecutor.java +++ b/server/src/main/java/org/opensearch/common/util/BatchRunnableExecutor.java @@ -61,6 +61,13 @@ public void run() { "Time taken to execute timed runnables in this cycle:[{}ms]", TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime) ); + onComplete(); } + /** + * Callback method that is invoked after all {@link TimeoutAwareRunnable} instances in the batch have been processed. + * By default, this method does nothing, but it can be overridden by subclasses or modified in the implementation if + * there is a need to perform additional actions once the batch execution is completed. + */ + public void onComplete() {} } diff --git a/server/src/main/java/org/opensearch/common/util/ByteArrayBackedBitset.java b/server/src/main/java/org/opensearch/common/util/ByteArrayBackedBitset.java new file mode 100644 index 0000000000000..2d7948d414937 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/util/ByteArrayBackedBitset.java @@ -0,0 +1,86 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.util; + +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.store.RandomAccessInput; + +import java.io.IOException; + +/** + * A bitset backed by a byte array. This will initialize and set bits in the byte array based on the index. + */ +public class ByteArrayBackedBitset { + private final byte[] byteArray; + + /** + * Constructor which uses an on heap list. This should be using during construction of the bitset. + */ + public ByteArrayBackedBitset(int capacity) { + byteArray = new byte[capacity]; + } + + /** + * Constructor which set the Lucene's RandomAccessInput to read the bitset into a read-only buffer. + */ + public ByteArrayBackedBitset(RandomAccessInput in, long offset, int length) throws IOException { + byteArray = new byte[length]; + int i = 0; + while (i < length) { + byteArray[i] = in.readByte(offset + i); + i++; + } + } + + /** + * Constructor which set the Lucene's IndexInput to read the bitset into a read-only buffer. + */ + public ByteArrayBackedBitset(IndexInput in, int length) throws IOException { + byteArray = new byte[length]; + int i = 0; + while (i < length) { + byteArray[i] = in.readByte(); + i++; + } + } + + /** + * Sets the bit at the given index to 1. + * Each byte can indicate 8 bits, so the index is divided by 8 to get the byte array index. + * @param index the index to set the bit + */ + public void set(int index) { + int byteArrIndex = index >> 3; + byteArray[byteArrIndex] |= (byte) (1 << (index & 7)); + } + + public int write(IndexOutput output) throws IOException { + int numBytes = 0; + for (Byte bitSet : byteArray) { + output.writeByte(bitSet); + numBytes += Byte.BYTES; + } + return numBytes; + } + + /** + * Retrieves whether the bit is set or not at the given index. + * @param index the index to look up for the bit + * @return true if bit is set, false otherwise + */ + public boolean get(int index) throws IOException { + int byteArrIndex = index >> 3; + return (byteArray[byteArrIndex] & (1 << (index & 7))) != 0; + } + + public int getCurrBytesRead() { + return byteArray.length; + } +} diff --git a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java index 9d57e6939e3ae..e2554d61116ad 100644 --- a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java +++ b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java @@ -72,6 +72,11 @@ public class FeatureFlags { */ public static final String REMOTE_PUBLICATION_EXPERIMENTAL = "opensearch.experimental.feature.remote_store.publication.enabled"; + /** + * Gates the functionality of background task execution. + */ + public static final String BACKGROUND_TASK_EXECUTION_EXPERIMENTAL = "opensearch.experimental.feature.task.background.enabled"; + public static final Setting REMOTE_STORE_MIGRATION_EXPERIMENTAL_SETTING = Setting.boolSetting( REMOTE_STORE_MIGRATION_EXPERIMENTAL, false, diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java index 906a27e9f398c..070e18481f2a3 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java @@ -45,11 +45,13 @@ import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.http.HttpTransportSettings; +import org.opensearch.secure_sm.ThreadContextPermission; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskThreadContextStatePropagator; import java.io.IOException; import java.nio.charset.StandardCharsets; +import java.security.Permission; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -111,6 +113,12 @@ public final class ThreadContext implements Writeable { */ public static final String ACTION_ORIGIN_TRANSIENT_NAME = "action.origin"; + // thread context permissions + + private static final Permission ACCESS_SYSTEM_THREAD_CONTEXT_PERMISSION = new ThreadContextPermission("markAsSystemContext"); + private static final Permission STASH_AND_MERGE_THREAD_CONTEXT_PERMISSION = new ThreadContextPermission("stashAndMergeHeaders"); + private static final Permission STASH_WITH_ORIGIN_THREAD_CONTEXT_PERMISSION = new ThreadContextPermission("stashWithOrigin"); + private static final Logger logger = LogManager.getLogger(ThreadContext.class); private static final ThreadContextStruct DEFAULT_CONTEXT = new ThreadContextStruct(); private final Map defaultHeader; @@ -205,8 +213,19 @@ public Writeable captureAsWriteable() { * For example, a user might not have permission to GET from the tasks index * but the tasks API will perform a get on their behalf using this method * if it can't find the task in memory. + * + * Usage of stashWithOrigin is guarded by a ThreadContextPermission. In order to use + * stashWithOrigin, the codebase needs to explicitly be granted permission in the JSM policy file. + * + * Add an entry in the grant portion of the policy file like this: + * + * permission org.opensearch.secure_sm.ThreadContextPermission "stashWithOrigin"; */ public StoredContext stashWithOrigin(String origin) { + SecurityManager sm = System.getSecurityManager(); + if (sm != null) { + sm.checkPermission(STASH_WITH_ORIGIN_THREAD_CONTEXT_PERMISSION); + } final ThreadContext.StoredContext storedContext = stashContext(); putTransient(ACTION_ORIGIN_TRANSIENT_NAME, origin); return storedContext; @@ -216,8 +235,19 @@ public StoredContext stashWithOrigin(String origin) { * Removes the current context and resets a new context that contains a merge of the current headers and the given headers. * The removed context can be restored when closing the returned {@link StoredContext}. The merge strategy is that headers * that are already existing are preserved unless they are defaults. + * + * Usage of stashAndMergeHeaders is guarded by a ThreadContextPermission. In order to use + * stashAndMergeHeaders, the codebase needs to explicitly be granted permission in the JSM policy file. + * + * Add an entry in the grant portion of the policy file like this: + * + * permission org.opensearch.secure_sm.ThreadContextPermission "stashAndMergeHeaders"; */ public StoredContext stashAndMergeHeaders(Map headers) { + SecurityManager sm = System.getSecurityManager(); + if (sm != null) { + sm.checkPermission(STASH_AND_MERGE_THREAD_CONTEXT_PERMISSION); + } final ThreadContextStruct context = threadLocal.get(); Map newHeader = new HashMap<>(headers); newHeader.putAll(context.requestHeaders); @@ -554,8 +584,19 @@ boolean isDefaultContext() { /** * Marks this thread context as an internal system context. This signals that actions in this context are issued * by the system itself rather than by a user action. + * + * Usage of markAsSystemContext is guarded by a ThreadContextPermission. In order to use + * markAsSystemContext, the codebase needs to explicitly be granted permission in the JSM policy file. + * + * Add an entry in the grant portion of the policy file like this: + * + * permission org.opensearch.secure_sm.ThreadContextPermission "markAsSystemContext"; */ public void markAsSystemContext() { + SecurityManager sm = System.getSecurityManager(); + if (sm != null) { + sm.checkPermission(ACCESS_SYSTEM_THREAD_CONTEXT_PERMISSION); + } threadLocal.set(threadLocal.get().setSystemContext(propagators)); } diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContextAccess.java b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContextAccess.java new file mode 100644 index 0000000000000..14f8b8d79bf4d --- /dev/null +++ b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContextAccess.java @@ -0,0 +1,41 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.util.concurrent; + +import org.opensearch.SpecialPermission; +import org.opensearch.common.annotation.InternalApi; + +import java.security.AccessController; +import java.security.PrivilegedAction; + +/** + * This class wraps the {@link ThreadContext} operations requiring access in + * {@link AccessController#doPrivileged(PrivilegedAction)} blocks. + * + * @opensearch.internal + */ +@SuppressWarnings("removal") +@InternalApi +public final class ThreadContextAccess { + + private ThreadContextAccess() {} + + public static T doPrivileged(PrivilegedAction operation) { + SpecialPermission.check(); + return AccessController.doPrivileged(operation); + } + + public static void doPrivilegedVoid(Runnable action) { + SpecialPermission.check(); + AccessController.doPrivileged((PrivilegedAction) () -> { + action.run(); + return null; + }); + } +} diff --git a/server/src/main/java/org/opensearch/common/xcontent/JsonToStringXContentParser.java b/server/src/main/java/org/opensearch/common/xcontent/JsonToStringXContentParser.java index 998122d9e5c43..d24571fc5778d 100644 --- a/server/src/main/java/org/opensearch/common/xcontent/JsonToStringXContentParser.java +++ b/server/src/main/java/org/opensearch/common/xcontent/JsonToStringXContentParser.java @@ -9,6 +9,7 @@ package org.opensearch.common.xcontent; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.AbstractXContentParser; import org.opensearch.core.xcontent.DeprecationHandler; @@ -23,6 +24,9 @@ import java.math.BigInteger; import java.nio.CharBuffer; import java.util.ArrayList; +import java.util.Collections; +import java.util.Deque; +import java.util.LinkedList; /** * JsonToStringParser is the main parser class to transform JSON into stringFields in a XContentParser @@ -32,21 +36,20 @@ */ public class JsonToStringXContentParser extends AbstractXContentParser { private final String fieldTypeName; - private XContentParser parser; + private final XContentParser parser; - private ArrayList valueList = new ArrayList<>(); - private ArrayList valueAndPathList = new ArrayList<>(); - private ArrayList keyList = new ArrayList<>(); + private final ArrayList valueList = new ArrayList<>(); + private final ArrayList valueAndPathList = new ArrayList<>(); + private final ArrayList keyList = new ArrayList<>(); - private XContentBuilder builder = XContentBuilder.builder(JsonXContent.jsonXContent); + private final XContentBuilder builder = XContentBuilder.builder(JsonXContent.jsonXContent); - private NamedXContentRegistry xContentRegistry; + private final NamedXContentRegistry xContentRegistry; - private DeprecationHandler deprecationHandler; + private final DeprecationHandler deprecationHandler; private static final String VALUE_AND_PATH_SUFFIX = "._valueAndPath"; private static final String VALUE_SUFFIX = "._value"; - private static final String DOT_SYMBOL = "."; private static final String EQUAL_SYMBOL = "="; public JsonToStringXContentParser( @@ -63,9 +66,14 @@ public JsonToStringXContentParser( } public XContentParser parseObject() throws IOException { + assert currentToken() == Token.START_OBJECT; + parser.nextToken(); // Skip the outer START_OBJECT. Need to return on END_OBJECT. + builder.startObject(); - StringBuilder path = new StringBuilder(fieldTypeName); - parseToken(path, null); + LinkedList path = new LinkedList<>(Collections.singleton(fieldTypeName)); + while (currentToken() != Token.END_OBJECT) { + parseToken(path); + } builder.field(this.fieldTypeName, keyList); builder.field(this.fieldTypeName + VALUE_SUFFIX, valueList); builder.field(this.fieldTypeName + VALUE_AND_PATH_SUFFIX, valueAndPathList); @@ -74,75 +82,55 @@ public XContentParser parseObject() throws IOException { return JsonXContent.jsonXContent.createParser(this.xContentRegistry, this.deprecationHandler, String.valueOf(jString)); } - private void parseToken(StringBuilder path, String currentFieldName) throws IOException { - - while (this.parser.nextToken() != Token.END_OBJECT) { - if (this.parser.currentName() != null) { - currentFieldName = this.parser.currentName(); + private void parseToken(Deque path) throws IOException { + if (this.parser.currentToken() == Token.FIELD_NAME) { + String fieldName = this.parser.currentName(); + path.addLast(fieldName); // Pushing onto the stack *must* be matched by pop + String parts = fieldName; + while (parts.contains(".")) { // Extract the intermediate keys maybe present in fieldName + int dotPos = parts.indexOf('.'); + String part = parts.substring(0, dotPos); + this.keyList.add(part); + parts = parts.substring(dotPos + 1); } - StringBuilder parsedFields = new StringBuilder(); - - if (this.parser.currentToken() == Token.FIELD_NAME) { - path.append(DOT_SYMBOL).append(currentFieldName); - int dotIndex = currentFieldName.indexOf(DOT_SYMBOL); - String fieldNameSuffix = currentFieldName; - // The field name may be of the form foo.bar.baz - // If that's the case, each "part" is a key. - while (dotIndex >= 0) { - String fieldNamePrefix = fieldNameSuffix.substring(0, dotIndex); - if (!fieldNamePrefix.isEmpty()) { - this.keyList.add(fieldNamePrefix); - } - fieldNameSuffix = fieldNameSuffix.substring(dotIndex + 1); - dotIndex = fieldNameSuffix.indexOf(DOT_SYMBOL); - } - if (!fieldNameSuffix.isEmpty()) { - this.keyList.add(fieldNameSuffix); - } - } else if (this.parser.currentToken() == Token.START_ARRAY) { - parseToken(path, currentFieldName); - break; - } else if (this.parser.currentToken() == Token.END_ARRAY) { - // skip - } else if (this.parser.currentToken() == Token.START_OBJECT) { - parseToken(path, currentFieldName); - int dotIndex = path.lastIndexOf(DOT_SYMBOL, path.length()); - - if (dotIndex != -1 && path.length() > currentFieldName.length()) { - path.setLength(path.length() - currentFieldName.length() - 1); - } - } else { - if (!path.toString().contains(currentFieldName)) { - path.append(DOT_SYMBOL).append(currentFieldName); - } - parseValue(parsedFields); - this.valueList.add(parsedFields.toString()); - this.valueAndPathList.add(path + EQUAL_SYMBOL + parsedFields); - int dotIndex = path.lastIndexOf(DOT_SYMBOL, path.length()); - if (dotIndex != -1 && path.length() > currentFieldName.length()) { - path.setLength(path.length() - currentFieldName.length() - 1); - } + this.keyList.add(parts); // parts has no dot, so either it's the original fieldName or it's the last part + this.parser.nextToken(); // advance to the value of fieldName + parseToken(path); // parse the value for fieldName (which will be an array, an object, or a primitive value) + path.removeLast(); // Here is where we pop fieldName from the stack (since we're done with the value of fieldName) + // Note that whichever other branch we just passed through has already ended with nextToken(), so we + // don't need to call it. + } else if (this.parser.currentToken() == Token.START_ARRAY) { + parser.nextToken(); + while (this.parser.currentToken() != Token.END_ARRAY) { + parseToken(path); } - + this.parser.nextToken(); + } else if (this.parser.currentToken() == Token.START_OBJECT) { + parser.nextToken(); + while (this.parser.currentToken() != Token.END_OBJECT) { + parseToken(path); + } + this.parser.nextToken(); + } else if (this.parser.currentToken().isValue()) { + String parsedValue = parseValue(); + if (parsedValue != null) { + this.valueList.add(parsedValue); + this.valueAndPathList.add(Strings.collectionToDelimitedString(path, ".") + EQUAL_SYMBOL + parsedValue); + } + this.parser.nextToken(); } } - private void parseValue(StringBuilder parsedFields) throws IOException { + private String parseValue() throws IOException { switch (this.parser.currentToken()) { case VALUE_BOOLEAN: case VALUE_NUMBER: case VALUE_STRING: case VALUE_NULL: - parsedFields.append(this.parser.textOrNull()); - break; + return this.parser.textOrNull(); // Handle other token types as needed - case FIELD_NAME: - case VALUE_EMBEDDED_OBJECT: - case END_ARRAY: - case START_ARRAY: - break; default: - throw new IOException("Unsupported token type [" + parser.currentToken() + "]"); + throw new IOException("Unsupported value token type [" + parser.currentToken() + "]"); } } diff --git a/server/src/main/java/org/opensearch/env/NodeEnvironment.java b/server/src/main/java/org/opensearch/env/NodeEnvironment.java index 2748938d8b761..709c0eba4f57f 100644 --- a/server/src/main/java/org/opensearch/env/NodeEnvironment.java +++ b/server/src/main/java/org/opensearch/env/NodeEnvironment.java @@ -35,7 +35,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Strings; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.store.Directory; @@ -61,6 +60,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; @@ -1300,7 +1300,7 @@ public static List collectFileCacheDataPath(NodePath fileCacheNodePath) th * Resolve the custom path for a index's shard. */ public static Path resolveBaseCustomLocation(String customDataPath, Path sharedDataPath, int nodeLockId) { - if (Strings.isNotEmpty(customDataPath)) { + if (!Strings.isNullOrEmpty(customDataPath)) { // This assert is because this should be caught by MetadataCreateIndexService assert sharedDataPath != null; return sharedDataPath.resolve(customDataPath).resolve(Integer.toString(nodeLockId)); diff --git a/server/src/main/java/org/opensearch/gateway/AsyncShardBatchFetch.java b/server/src/main/java/org/opensearch/gateway/AsyncShardBatchFetch.java index 4f39a39cea678..df642a9f5a743 100644 --- a/server/src/main/java/org/opensearch/gateway/AsyncShardBatchFetch.java +++ b/server/src/main/java/org/opensearch/gateway/AsyncShardBatchFetch.java @@ -80,6 +80,14 @@ public synchronized void clearShard(ShardId shardId) { this.cache.deleteShard(shardId); } + public boolean hasEmptyCache() { + return this.cache.getCache().isEmpty(); + } + + public AsyncShardFetchCache getCache() { + return this.cache; + } + /** * Cache implementation of transport actions returning batch of shards related data in the response. * Store node level responses of transport actions like {@link TransportNodesListGatewayStartedShardsBatch} or diff --git a/server/src/main/java/org/opensearch/gateway/BaseGatewayShardAllocator.java b/server/src/main/java/org/opensearch/gateway/BaseGatewayShardAllocator.java index 0d6af943d39e0..2b6c5e3f5ae53 100644 --- a/server/src/main/java/org/opensearch/gateway/BaseGatewayShardAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/BaseGatewayShardAllocator.java @@ -47,7 +47,6 @@ import org.opensearch.core.index.shard.ShardId; import java.util.ArrayList; -import java.util.HashSet; import java.util.List; import java.util.Set; @@ -82,23 +81,29 @@ public void allocateUnassigned( executeDecision(shardRouting, allocateUnassignedDecision, allocation, unassignedAllocationHandler); } - protected void allocateUnassignedBatchOnTimeout(List shardRoutings, RoutingAllocation allocation, boolean primary) { - Set shardIdsFromBatch = new HashSet<>(); - for (ShardRouting shardRouting : shardRoutings) { - ShardId shardId = shardRouting.shardId(); - shardIdsFromBatch.add(shardId); + protected void allocateUnassignedBatchOnTimeout(Set shardIds, RoutingAllocation allocation, boolean primary) { + if (shardIds.isEmpty()) { + return; } RoutingNodes.UnassignedShards.UnassignedIterator iterator = allocation.routingNodes().unassigned().iterator(); while (iterator.hasNext()) { ShardRouting unassignedShard = iterator.next(); AllocateUnassignedDecision allocationDecision; - if (unassignedShard.primary() == primary && shardIdsFromBatch.contains(unassignedShard.shardId())) { + if (unassignedShard.primary() == primary && shardIds.contains(unassignedShard.shardId())) { + if (isResponsibleFor(unassignedShard) == false) { + continue; + } allocationDecision = AllocateUnassignedDecision.throttle(null); executeDecision(unassignedShard, allocationDecision, allocation, iterator); } } } + /** + * Is the allocator responsible for allocating the given {@link ShardRouting}? + */ + protected abstract boolean isResponsibleFor(ShardRouting shardRouting); + protected void executeDecision( ShardRouting shardRouting, AllocateUnassignedDecision allocateUnassignedDecision, diff --git a/server/src/main/java/org/opensearch/gateway/PrimaryShardAllocator.java b/server/src/main/java/org/opensearch/gateway/PrimaryShardAllocator.java index f41545cbdf9bf..dea7ca9a08edd 100644 --- a/server/src/main/java/org/opensearch/gateway/PrimaryShardAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/PrimaryShardAllocator.java @@ -82,7 +82,7 @@ public abstract class PrimaryShardAllocator extends BaseGatewayShardAllocator { /** * Is the allocator responsible for allocating the given {@link ShardRouting}? */ - protected static boolean isResponsibleFor(final ShardRouting shard) { + protected boolean isResponsibleFor(final ShardRouting shard) { return shard.primary() // must be primary && shard.unassigned() // must be unassigned // only handle either an existing store or a snapshot recovery diff --git a/server/src/main/java/org/opensearch/gateway/ReplicaShardAllocator.java b/server/src/main/java/org/opensearch/gateway/ReplicaShardAllocator.java index aaf0d696e1444..c30ee8479ac97 100644 --- a/server/src/main/java/org/opensearch/gateway/ReplicaShardAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/ReplicaShardAllocator.java @@ -191,7 +191,7 @@ public void processExistingRecoveries(RoutingAllocation allocation) { /** * Is the allocator responsible for allocating the given {@link ShardRouting}? */ - protected static boolean isResponsibleFor(final ShardRouting shard) { + protected boolean isResponsibleFor(final ShardRouting shard) { return shard.primary() == false // must be a replica && shard.unassigned() // must be unassigned // if we are allocating a replica because of index creation, no need to go and find a copy, there isn't one... diff --git a/server/src/main/java/org/opensearch/gateway/ReplicaShardBatchAllocator.java b/server/src/main/java/org/opensearch/gateway/ReplicaShardBatchAllocator.java index 7c75f2a5d1a8f..020a543ac5fc5 100644 --- a/server/src/main/java/org/opensearch/gateway/ReplicaShardBatchAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/ReplicaShardBatchAllocator.java @@ -173,7 +173,7 @@ private AllocateUnassignedDecision getUnassignedShardAllocationDecision( RoutingAllocation allocation, Supplier> nodeStoreFileMetaDataMapSupplier ) { - if (!isResponsibleFor(shardRouting)) { + if (isResponsibleFor(shardRouting) == false) { return AllocateUnassignedDecision.NOT_TAKEN; } Tuple> result = canBeAllocatedToAtLeastOneNode(shardRouting, allocation); @@ -183,7 +183,7 @@ private AllocateUnassignedDecision getUnassignedShardAllocationDecision( if (allocationDecision.type() != Decision.Type.YES && (!explain || !hasInitiatedFetching(shardRouting))) { // only return early if we are not in explain mode, or we are in explain mode but we have not // yet attempted to fetch any shard data - logger.trace("{}: ignoring allocation, can't be allocated on any node", shardRouting); + logger.trace("{}: ignoring allocation, can't be allocated on any node. Decision: {}", shardRouting, allocationDecision.type()); return AllocateUnassignedDecision.no( UnassignedInfo.AllocationStatus.fromDecision(allocationDecision.type()), result.v2() != null ? new ArrayList<>(result.v2().values()) : null diff --git a/server/src/main/java/org/opensearch/gateway/ShardsBatchGatewayAllocator.java b/server/src/main/java/org/opensearch/gateway/ShardsBatchGatewayAllocator.java index 55f5388d8f454..d18304ea73ed0 100644 --- a/server/src/main/java/org/opensearch/gateway/ShardsBatchGatewayAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/ShardsBatchGatewayAllocator.java @@ -73,13 +73,14 @@ public class ShardsBatchGatewayAllocator implements ExistingShardsAllocator { private final long maxBatchSize; private static final short DEFAULT_SHARD_BATCH_SIZE = 2000; - private static final String PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING_KEY = + public static final String PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING_KEY = "cluster.routing.allocation.shards_batch_gateway_allocator.primary_allocator_timeout"; - private static final String REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING_KEY = + public static final String REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING_KEY = "cluster.routing.allocation.shards_batch_gateway_allocator.replica_allocator_timeout"; private TimeValue primaryShardsBatchGatewayAllocatorTimeout; private TimeValue replicaShardsBatchGatewayAllocatorTimeout; + public static final TimeValue MIN_ALLOCATOR_TIMEOUT = TimeValue.timeValueSeconds(20); /** * Number of shards we send in one batch to data nodes for fetching metadata @@ -92,16 +93,50 @@ public class ShardsBatchGatewayAllocator implements ExistingShardsAllocator { Setting.Property.NodeScope ); + /** + * Timeout for existing primary shards batch allocator. + * Timeout value must be greater than or equal to 20s or -1ms to effectively disable timeout + */ public static final Setting PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING = Setting.timeSetting( PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING_KEY, TimeValue.MINUS_ONE, + TimeValue.MINUS_ONE, + new Setting.Validator<>() { + @Override + public void validate(TimeValue timeValue) { + if (timeValue.compareTo(MIN_ALLOCATOR_TIMEOUT) < 0 && timeValue.compareTo(TimeValue.MINUS_ONE) != 0) { + throw new IllegalArgumentException( + "Setting [" + + PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING.getKey() + + "] should be more than 20s or -1ms to disable timeout" + ); + } + } + }, Setting.Property.NodeScope, Setting.Property.Dynamic ); + /** + * Timeout for existing replica shards batch allocator. + * Timeout value must be greater than or equal to 20s or -1ms to effectively disable timeout + */ public static final Setting REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING = Setting.timeSetting( REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING_KEY, TimeValue.MINUS_ONE, + TimeValue.MINUS_ONE, + new Setting.Validator<>() { + @Override + public void validate(TimeValue timeValue) { + if (timeValue.compareTo(MIN_ALLOCATOR_TIMEOUT) < 0 && timeValue.compareTo(TimeValue.MINUS_ONE) != 0) { + throw new IllegalArgumentException( + "Setting [" + + REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING.getKey() + + "] should be more than 20s or -1ms to disable timeout" + ); + } + } + }, Setting.Property.NodeScope, Setting.Property.Dynamic ); @@ -242,17 +277,14 @@ protected BatchRunnableExecutor innerAllocateUnassignedBatch( } List runnables = new ArrayList<>(); if (primary) { + Set timedOutPrimaryShardIds = new HashSet<>(); batchIdToStartedShardBatch.values() .stream() .filter(batch -> batchesToAssign.contains(batch.batchId)) .forEach(shardsBatch -> runnables.add(new TimeoutAwareRunnable() { @Override public void onTimeout() { - primaryBatchShardAllocator.allocateUnassignedBatchOnTimeout( - shardsBatch.getBatchedShardRoutings(), - allocation, - true - ); + timedOutPrimaryShardIds.addAll(shardsBatch.getBatchedShards()); } @Override @@ -260,15 +292,22 @@ public void run() { primaryBatchShardAllocator.allocateUnassignedBatch(shardsBatch.getBatchedShardRoutings(), allocation); } })); - return new BatchRunnableExecutor(runnables, () -> primaryShardsBatchGatewayAllocatorTimeout); + return new BatchRunnableExecutor(runnables, () -> primaryShardsBatchGatewayAllocatorTimeout) { + @Override + public void onComplete() { + logger.trace("Triggering oncomplete after timeout for [{}] primary shards", timedOutPrimaryShardIds.size()); + primaryBatchShardAllocator.allocateUnassignedBatchOnTimeout(timedOutPrimaryShardIds, allocation, true); + } + }; } else { + Set timedOutReplicaShardIds = new HashSet<>(); batchIdToStoreShardBatch.values() .stream() .filter(batch -> batchesToAssign.contains(batch.batchId)) .forEach(batch -> runnables.add(new TimeoutAwareRunnable() { @Override public void onTimeout() { - replicaBatchShardAllocator.allocateUnassignedBatchOnTimeout(batch.getBatchedShardRoutings(), allocation, false); + timedOutReplicaShardIds.addAll(batch.getBatchedShards()); } @Override @@ -276,7 +315,13 @@ public void run() { replicaBatchShardAllocator.allocateUnassignedBatch(batch.getBatchedShardRoutings(), allocation); } })); - return new BatchRunnableExecutor(runnables, () -> replicaShardsBatchGatewayAllocatorTimeout); + return new BatchRunnableExecutor(runnables, () -> replicaShardsBatchGatewayAllocatorTimeout) { + @Override + public void onComplete() { + logger.trace("Triggering oncomplete after timeout for [{}] replica shards", timedOutReplicaShardIds.size()); + replicaBatchShardAllocator.allocateUnassignedBatchOnTimeout(timedOutReplicaShardIds, allocation, false); + } + }; } } @@ -576,8 +621,37 @@ protected AsyncShardFetch.FetchResult( + new RemoteWriteableEntityBlobStore<>( blobStoreTransferService, blobStoreRepository, clusterName, threadpool, - ThreadPool.Names.REMOTE_STATE_READ + ThreadPool.Names.REMOTE_STATE_READ, + RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN ) ); this.remoteWritableEntityStores.put( RemoteClusterBlocks.CLUSTER_BLOCKS, - new RemoteClusterStateBlobStore<>( + new RemoteWriteableEntityBlobStore<>( blobStoreTransferService, blobStoreRepository, clusterName, threadpool, - ThreadPool.Names.REMOTE_STATE_READ + ThreadPool.Names.REMOTE_STATE_READ, + RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN ) ); this.remoteWritableEntityStores.put( RemoteClusterStateCustoms.CLUSTER_STATE_CUSTOM, - new RemoteClusterStateBlobStore<>( + new RemoteWriteableEntityBlobStore<>( blobStoreTransferService, blobStoreRepository, clusterName, threadpool, - ThreadPool.Names.REMOTE_STATE_READ + ThreadPool.Names.REMOTE_STATE_READ, + RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN ) ); } @@ -80,7 +83,7 @@ public class RemoteClusterStateAttributesManager extends AbstractRemoteWritableE @Override protected ActionListener getWrappedWriteListener( String component, - AbstractRemoteWritableBlobEntity remoteEntity, + AbstractClusterMetadataWriteableBlobEntity remoteEntity, ActionListener listener ) { return ActionListener.wrap( @@ -92,7 +95,7 @@ protected ActionListener getWrappedWriteListener( @Override protected ActionListener getWrappedReadListener( String component, - AbstractRemoteWritableBlobEntity remoteEntity, + AbstractClusterMetadataWriteableBlobEntity remoteEntity, ActionListener listener ) { return ActionListener.wrap( diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManager.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManager.java index 8691187c7fbfa..02db15477ff95 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManager.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManager.java @@ -11,7 +11,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Strings; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.routing.remote.RemoteRoutingTableService; import org.opensearch.cluster.service.ClusterApplierService; @@ -23,6 +22,7 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.AbstractAsyncTask; import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.Strings; import org.opensearch.index.translog.transfer.BlobStoreTransferService; import org.opensearch.threadpool.ThreadPool; @@ -125,8 +125,8 @@ void cleanUpStaleFiles() { ClusterState currentAppliedState = clusterApplierService.state(); if (currentAppliedState.nodes().isLocalNodeElectedClusterManager()) { long cleanUpAttemptStateVersion = currentAppliedState.version(); - assert Strings.isNotEmpty(currentAppliedState.getClusterName().value()) : "cluster name is not set"; - assert Strings.isNotEmpty(currentAppliedState.metadata().clusterUUID()) : "cluster uuid is not set"; + assert !Strings.isNullOrEmpty(currentAppliedState.getClusterName().value()) : "cluster name is not set"; + assert !Strings.isNullOrEmpty(currentAppliedState.metadata().clusterUUID()) : "cluster uuid is not set"; if (cleanUpAttemptStateVersion - lastCleanupAttemptStateVersion > SKIP_CLEANUP_STATE_CHANGES) { logger.info( "Cleaning up stale remote state files for cluster [{}] with uuid [{}]. Last clean was done before {} updates", diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java index 674279f2251bd..910f601a81ca8 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java @@ -603,7 +603,7 @@ UploadedMetadataResults writeMetadataInParallel( new RemoteDiscoveryNodes( clusterState.nodes(), clusterState.version(), - clusterState.stateUUID(), + clusterState.metadata().clusterUUID(), blobStoreRepository.getCompressor() ), listener diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteGlobalMetadataManager.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteGlobalMetadataManager.java index 5a6f4b7e9f1f1..763a8e3ff4951 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteGlobalMetadataManager.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteGlobalMetadataManager.java @@ -16,8 +16,9 @@ import org.opensearch.cluster.metadata.Metadata.Custom; import org.opensearch.cluster.metadata.Metadata.XContentContext; import org.opensearch.cluster.metadata.TemplatesMetadata; -import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; import org.opensearch.common.remote.AbstractRemoteWritableEntityManager; +import org.opensearch.common.remote.RemoteWriteableEntityBlobStore; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; @@ -26,7 +27,6 @@ import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.compress.Compressor; import org.opensearch.core.xcontent.NamedXContentRegistry; -import org.opensearch.gateway.remote.model.RemoteClusterStateBlobStore; import org.opensearch.gateway.remote.model.RemoteCoordinationMetadata; import org.opensearch.gateway.remote.model.RemoteCustomMetadata; import org.opensearch.gateway.remote.model.RemoteGlobalMetadata; @@ -85,72 +85,79 @@ public class RemoteGlobalMetadataManager extends AbstractRemoteWritableEntityMan this.namedWriteableRegistry = namedWriteableRegistry; this.remoteWritableEntityStores.put( RemoteGlobalMetadata.GLOBAL_METADATA, - new RemoteClusterStateBlobStore<>( + new RemoteWriteableEntityBlobStore<>( blobStoreTransferService, blobStoreRepository, clusterName, threadpool, - ThreadPool.Names.REMOTE_STATE_READ + ThreadPool.Names.REMOTE_STATE_READ, + RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN ) ); this.remoteWritableEntityStores.put( RemoteCoordinationMetadata.COORDINATION_METADATA, - new RemoteClusterStateBlobStore<>( + new RemoteWriteableEntityBlobStore<>( blobStoreTransferService, blobStoreRepository, clusterName, threadpool, - ThreadPool.Names.REMOTE_STATE_READ + ThreadPool.Names.REMOTE_STATE_READ, + RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN ) ); this.remoteWritableEntityStores.put( RemotePersistentSettingsMetadata.SETTING_METADATA, - new RemoteClusterStateBlobStore<>( + new RemoteWriteableEntityBlobStore<>( blobStoreTransferService, blobStoreRepository, clusterName, threadpool, - ThreadPool.Names.REMOTE_STATE_READ + ThreadPool.Names.REMOTE_STATE_READ, + RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN ) ); this.remoteWritableEntityStores.put( RemoteTransientSettingsMetadata.TRANSIENT_SETTING_METADATA, - new RemoteClusterStateBlobStore<>( + new RemoteWriteableEntityBlobStore<>( blobStoreTransferService, blobStoreRepository, clusterName, threadpool, - ThreadPool.Names.REMOTE_STATE_READ + ThreadPool.Names.REMOTE_STATE_READ, + RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN ) ); this.remoteWritableEntityStores.put( RemoteHashesOfConsistentSettings.HASHES_OF_CONSISTENT_SETTINGS, - new RemoteClusterStateBlobStore<>( + new RemoteWriteableEntityBlobStore<>( blobStoreTransferService, blobStoreRepository, clusterName, threadpool, - ThreadPool.Names.REMOTE_STATE_READ + ThreadPool.Names.REMOTE_STATE_READ, + RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN ) ); this.remoteWritableEntityStores.put( RemoteTemplatesMetadata.TEMPLATES_METADATA, - new RemoteClusterStateBlobStore<>( + new RemoteWriteableEntityBlobStore<>( blobStoreTransferService, blobStoreRepository, clusterName, threadpool, - ThreadPool.Names.REMOTE_STATE_READ + ThreadPool.Names.REMOTE_STATE_READ, + RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN ) ); this.remoteWritableEntityStores.put( RemoteCustomMetadata.CUSTOM_METADATA, - new RemoteClusterStateBlobStore<>( + new RemoteWriteableEntityBlobStore<>( blobStoreTransferService, blobStoreRepository, clusterName, threadpool, - ThreadPool.Names.REMOTE_STATE_READ + ThreadPool.Names.REMOTE_STATE_READ, + RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN ) ); clusterSettings.addSettingsUpdateConsumer(GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING, this::setGlobalMetadataUploadTimeout); @@ -159,7 +166,7 @@ public class RemoteGlobalMetadataManager extends AbstractRemoteWritableEntityMan @Override protected ActionListener getWrappedWriteListener( String component, - AbstractRemoteWritableBlobEntity remoteEntity, + AbstractClusterMetadataWriteableBlobEntity remoteEntity, ActionListener listener ) { return ActionListener.wrap( @@ -171,7 +178,7 @@ protected ActionListener getWrappedWriteListener( @Override protected ActionListener getWrappedReadListener( String component, - AbstractRemoteWritableBlobEntity remoteEntity, + AbstractClusterMetadataWriteableBlobEntity remoteEntity, ActionListener listener ) { return ActionListener.wrap( diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteIndexMetadataManager.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteIndexMetadataManager.java index c30721c8f625c..d1f08a7c2a33d 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteIndexMetadataManager.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteIndexMetadataManager.java @@ -9,15 +9,15 @@ package org.opensearch.gateway.remote; import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; import org.opensearch.common.remote.AbstractRemoteWritableEntityManager; +import org.opensearch.common.remote.RemoteWriteableEntityBlobStore; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.action.ActionListener; import org.opensearch.core.compress.Compressor; import org.opensearch.core.xcontent.NamedXContentRegistry; -import org.opensearch.gateway.remote.model.RemoteClusterStateBlobStore; import org.opensearch.gateway.remote.model.RemoteIndexMetadata; import org.opensearch.gateway.remote.model.RemoteReadResult; import org.opensearch.index.translog.transfer.BlobStoreTransferService; @@ -58,12 +58,13 @@ public RemoteIndexMetadataManager( ) { this.remoteWritableEntityStores.put( RemoteIndexMetadata.INDEX, - new RemoteClusterStateBlobStore<>( + new RemoteWriteableEntityBlobStore<>( blobStoreTransferService, blobStoreRepository, clusterName, threadpool, - ThreadPool.Names.REMOTE_STATE_READ + ThreadPool.Names.REMOTE_STATE_READ, + RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN ) ); this.namedXContentRegistry = blobStoreRepository.getNamedXContentRegistry(); @@ -106,7 +107,7 @@ private void setIndexMetadataUploadTimeout(TimeValue newIndexMetadataUploadTimeo @Override protected ActionListener getWrappedWriteListener( String component, - AbstractRemoteWritableBlobEntity remoteEntity, + AbstractClusterMetadataWriteableBlobEntity remoteEntity, ActionListener listener ) { return ActionListener.wrap( @@ -118,7 +119,7 @@ protected ActionListener getWrappedWriteListener( @Override protected ActionListener getWrappedReadListener( String component, - AbstractRemoteWritableBlobEntity remoteEntity, + AbstractClusterMetadataWriteableBlobEntity remoteEntity, ActionListener listener ) { return ActionListener.wrap( diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteManifestManager.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteManifestManager.java index cb09de1a6ec44..0ccadd7dd18da 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteManifestManager.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteManifestManager.java @@ -16,6 +16,7 @@ import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.remote.RemoteWriteableEntityBlobStore; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.unit.TimeValue; @@ -23,7 +24,6 @@ import org.opensearch.core.compress.Compressor; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.gateway.remote.model.RemoteClusterMetadataManifest; -import org.opensearch.gateway.remote.model.RemoteClusterStateBlobStore; import org.opensearch.gateway.remote.model.RemoteClusterStateManifestInfo; import org.opensearch.index.remote.RemoteStoreUtils; import org.opensearch.index.translog.transfer.BlobStoreTransferService; @@ -62,7 +62,7 @@ public class RemoteManifestManager { private volatile TimeValue metadataManifestUploadTimeout; private final String nodeId; - private final RemoteClusterStateBlobStore manifestBlobStore; + private final RemoteWriteableEntityBlobStore manifestBlobStore; private final Compressor compressor; private final NamedXContentRegistry namedXContentRegistry; // todo remove blobStorerepo from here @@ -78,12 +78,13 @@ public class RemoteManifestManager { ) { this.metadataManifestUploadTimeout = clusterSettings.get(METADATA_MANIFEST_UPLOAD_TIMEOUT_SETTING); this.nodeId = nodeId; - this.manifestBlobStore = new RemoteClusterStateBlobStore<>( + this.manifestBlobStore = new RemoteWriteableEntityBlobStore<>( blobStoreTransferService, blobStoreRepository, clusterName, threadpool, - ThreadPool.Names.REMOTE_STATE_READ + ThreadPool.Names.REMOTE_STATE_READ, + RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN ); ; clusterSettings.addSettingsUpdateConsumer(METADATA_MANIFEST_UPLOAD_TIMEOUT_SETTING, this::setMetadataManifestUploadTimeout); diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterBlocks.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterBlocks.java index 9c5fbd5941640..101daaa143a66 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterBlocks.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterBlocks.java @@ -10,7 +10,7 @@ import org.opensearch.cluster.block.ClusterBlocks; import org.opensearch.common.io.Streams; -import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; import org.opensearch.common.remote.BlobPathParameters; import org.opensearch.core.compress.Compressor; import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedMetadata; @@ -29,7 +29,7 @@ /** * Wrapper class for uploading/downloading {@link ClusterBlocks} to/from remote blob store */ -public class RemoteClusterBlocks extends AbstractRemoteWritableBlobEntity { +public class RemoteClusterBlocks extends AbstractClusterMetadataWriteableBlobEntity { public static final String CLUSTER_BLOCKS = "blocks"; public static final ChecksumWritableBlobStoreFormat CLUSTER_BLOCKS_FORMAT = new ChecksumWritableBlobStoreFormat<>( diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterMetadataManifest.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterMetadataManifest.java index acaae3173315a..5f79b690af574 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterMetadataManifest.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterMetadataManifest.java @@ -9,7 +9,7 @@ package org.opensearch.gateway.remote.model; import org.opensearch.common.io.Streams; -import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; import org.opensearch.common.remote.BlobPathParameters; import org.opensearch.core.compress.Compressor; import org.opensearch.core.xcontent.NamedXContentRegistry; @@ -29,7 +29,7 @@ /** * Wrapper class for uploading/downloading {@link ClusterMetadataManifest} to/from remote blob store */ -public class RemoteClusterMetadataManifest extends AbstractRemoteWritableBlobEntity { +public class RemoteClusterMetadataManifest extends AbstractClusterMetadataWriteableBlobEntity { public static final String MANIFEST = "manifest"; public static final int SPLITTED_MANIFEST_FILE_LENGTH = 6; diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterStateCustoms.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterStateCustoms.java index affbc7ba66cb8..e5e44525520f4 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterStateCustoms.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterStateCustoms.java @@ -11,7 +11,7 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterState.Custom; import org.opensearch.common.io.Streams; -import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; import org.opensearch.common.remote.BlobPathParameters; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.StreamInput; @@ -32,7 +32,7 @@ /** * Wrapper class for uploading/downloading {@link Custom} to/from remote blob store */ -public class RemoteClusterStateCustoms extends AbstractRemoteWritableBlobEntity { +public class RemoteClusterStateCustoms extends AbstractClusterMetadataWriteableBlobEntity { public static final String CLUSTER_STATE_CUSTOM = "cluster-state-custom"; public final ChecksumWritableBlobStoreFormat clusterStateCustomsFormat; diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteCoordinationMetadata.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteCoordinationMetadata.java index a90721ab59f66..63cc96e3e02c4 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteCoordinationMetadata.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteCoordinationMetadata.java @@ -10,7 +10,7 @@ import org.opensearch.cluster.coordination.CoordinationMetadata; import org.opensearch.common.io.Streams; -import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; import org.opensearch.common.remote.BlobPathParameters; import org.opensearch.core.compress.Compressor; import org.opensearch.core.xcontent.NamedXContentRegistry; @@ -31,7 +31,7 @@ /** * Wrapper class for uploading/downloading {@link CoordinationMetadata} to/from remote blob store */ -public class RemoteCoordinationMetadata extends AbstractRemoteWritableBlobEntity { +public class RemoteCoordinationMetadata extends AbstractClusterMetadataWriteableBlobEntity { public static final String COORDINATION_METADATA = "coordination"; public static final ChecksumBlobStoreFormat COORDINATION_METADATA_FORMAT = new ChecksumBlobStoreFormat<>( diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteCustomMetadata.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteCustomMetadata.java index ec5dfbec820d4..8e850e903954a 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteCustomMetadata.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteCustomMetadata.java @@ -10,7 +10,7 @@ import org.opensearch.cluster.metadata.Metadata.Custom; import org.opensearch.common.io.Streams; -import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; import org.opensearch.common.remote.BlobPathParameters; import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; @@ -32,7 +32,7 @@ /** * Wrapper class for uploading/downloading {@link Custom} to/from remote blob store */ -public class RemoteCustomMetadata extends AbstractRemoteWritableBlobEntity { +public class RemoteCustomMetadata extends AbstractClusterMetadataWriteableBlobEntity { public static final String CUSTOM_METADATA = "custom"; public static final String CUSTOM_DELIMITER = "--"; diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteDiscoveryNodes.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteDiscoveryNodes.java index fb399e2899cdd..446207a767009 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteDiscoveryNodes.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteDiscoveryNodes.java @@ -10,7 +10,7 @@ import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.io.Streams; -import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; import org.opensearch.common.remote.BlobPathParameters; import org.opensearch.core.compress.Compressor; import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedMetadata; @@ -29,7 +29,7 @@ /** * Wrapper class for uploading/downloading {@link DiscoveryNodes} to/from remote blob store */ -public class RemoteDiscoveryNodes extends AbstractRemoteWritableBlobEntity { +public class RemoteDiscoveryNodes extends AbstractClusterMetadataWriteableBlobEntity { public static final String DISCOVERY_NODES = "nodes"; public static final ChecksumWritableBlobStoreFormat DISCOVERY_NODES_FORMAT = new ChecksumWritableBlobStoreFormat<>( diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteGlobalMetadata.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteGlobalMetadata.java index 09f07de0d5c24..0082f873f8dba 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteGlobalMetadata.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteGlobalMetadata.java @@ -10,7 +10,7 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.common.io.Streams; -import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; import org.opensearch.common.remote.BlobPathParameters; import org.opensearch.core.compress.Compressor; import org.opensearch.core.xcontent.NamedXContentRegistry; @@ -25,7 +25,7 @@ /** * Wrapper class for uploading/downloading global metadata ({@link Metadata}) to/from remote blob store */ -public class RemoteGlobalMetadata extends AbstractRemoteWritableBlobEntity { +public class RemoteGlobalMetadata extends AbstractClusterMetadataWriteableBlobEntity { public static final String GLOBAL_METADATA = "global_metadata"; public static final ChecksumBlobStoreFormat GLOBAL_METADATA_FORMAT = new ChecksumBlobStoreFormat<>( diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteHashesOfConsistentSettings.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteHashesOfConsistentSettings.java index 1debf75cdfec9..dee48237e5c4c 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteHashesOfConsistentSettings.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteHashesOfConsistentSettings.java @@ -10,7 +10,7 @@ import org.opensearch.cluster.metadata.DiffableStringMap; import org.opensearch.common.io.Streams; -import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; import org.opensearch.common.remote.BlobPathParameters; import org.opensearch.core.compress.Compressor; import org.opensearch.gateway.remote.ClusterMetadataManifest; @@ -28,7 +28,7 @@ /** * Wrapper class for uploading/downloading {@link DiffableStringMap} to/from remote blob store */ -public class RemoteHashesOfConsistentSettings extends AbstractRemoteWritableBlobEntity { +public class RemoteHashesOfConsistentSettings extends AbstractClusterMetadataWriteableBlobEntity { public static final String HASHES_OF_CONSISTENT_SETTINGS = "hashes-of-consistent-settings"; public static final ChecksumWritableBlobStoreFormat HASHES_OF_CONSISTENT_SETTINGS_FORMAT = new ChecksumWritableBlobStoreFormat<>("hashes-of-consistent-settings", DiffableStringMap::readFrom); diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteIndexMetadata.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteIndexMetadata.java index 830b09b92e2cb..5308f92c633b1 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteIndexMetadata.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteIndexMetadata.java @@ -10,7 +10,7 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.io.Streams; -import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; import org.opensearch.common.remote.BlobPathParameters; import org.opensearch.core.compress.Compressor; import org.opensearch.core.xcontent.NamedXContentRegistry; @@ -29,7 +29,7 @@ /** * Wrapper class for uploading/downloading {@link IndexMetadata} to/from remote blob store */ -public class RemoteIndexMetadata extends AbstractRemoteWritableBlobEntity { +public class RemoteIndexMetadata extends AbstractClusterMetadataWriteableBlobEntity { public static final int INDEX_METADATA_CURRENT_CODEC_VERSION = 2; diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemotePersistentSettingsMetadata.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemotePersistentSettingsMetadata.java index 9ee3db8f289e3..81042f289254c 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemotePersistentSettingsMetadata.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemotePersistentSettingsMetadata.java @@ -9,7 +9,7 @@ package org.opensearch.gateway.remote.model; import org.opensearch.common.io.Streams; -import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; import org.opensearch.common.remote.BlobPathParameters; import org.opensearch.common.settings.Settings; import org.opensearch.core.compress.Compressor; @@ -31,7 +31,7 @@ /** * Wrapper class for uploading/downloading persistent {@link Settings} to/from remote blob store */ -public class RemotePersistentSettingsMetadata extends AbstractRemoteWritableBlobEntity { +public class RemotePersistentSettingsMetadata extends AbstractClusterMetadataWriteableBlobEntity { public static final String SETTING_METADATA = "settings"; diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemotePinnedTimestamps.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemotePinnedTimestamps.java new file mode 100644 index 0000000000000..030491cf8b7b9 --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemotePinnedTimestamps.java @@ -0,0 +1,144 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote.model; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.io.Streams; +import org.opensearch.common.remote.BlobPathParameters; +import org.opensearch.common.remote.RemoteWriteableBlobEntity; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.compress.Compressor; +import org.opensearch.index.remote.RemoteStoreUtils; +import org.opensearch.repositories.blobstore.ChecksumWritableBlobStoreFormat; + +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER; + +/** + * Wrapper class for uploading/downloading {@link RemotePinnedTimestamps} to/from remote blob store + * + * @opensearch.internal + */ +public class RemotePinnedTimestamps extends RemoteWriteableBlobEntity { + private static final Logger logger = LogManager.getLogger(RemotePinnedTimestamps.class); + + /** + * Represents a collection of pinned timestamps and their associated pinning entities. + * This class is thread-safe and implements the Writeable interface for serialization. + */ + public static class PinnedTimestamps implements Writeable { + private final Map> pinnedTimestampPinningEntityMap; + + public PinnedTimestamps(Map> pinnedTimestampPinningEntityMap) { + this.pinnedTimestampPinningEntityMap = new ConcurrentHashMap<>(pinnedTimestampPinningEntityMap); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeMap(pinnedTimestampPinningEntityMap, StreamOutput::writeLong, StreamOutput::writeStringCollection); + } + + public static PinnedTimestamps readFrom(StreamInput in) throws IOException { + return new PinnedTimestamps(in.readMap(StreamInput::readLong, StreamInput::readStringList)); + } + + /** + * Pins a timestamp against a pinning entity. + * + * @param timestamp The timestamp to pin. + * @param pinningEntity The entity pinning the timestamp. + */ + public void pin(Long timestamp, String pinningEntity) { + logger.debug("Pinning timestamp = {} against entity = {}", timestamp, pinningEntity); + pinnedTimestampPinningEntityMap.computeIfAbsent(timestamp, k -> new ArrayList<>()).add(pinningEntity); + } + + /** + * Unpins a timestamp for a specific pinning entity. + * + * @param timestamp The timestamp to unpin. + * @param pinningEntity The entity unpinning the timestamp. + */ + public void unpin(Long timestamp, String pinningEntity) { + logger.debug("Unpinning timestamp = {} against entity = {}", timestamp, pinningEntity); + if (pinnedTimestampPinningEntityMap.containsKey(timestamp) == false + || pinnedTimestampPinningEntityMap.get(timestamp).contains(pinningEntity) == false) { + logger.warn("Timestamp: {} is not pinned by entity: {}", timestamp, pinningEntity); + } + pinnedTimestampPinningEntityMap.compute(timestamp, (k, v) -> { + v.remove(pinningEntity); + return v.isEmpty() ? null : v; + }); + } + + public Map> getPinnedTimestampPinningEntityMap() { + return new HashMap<>(pinnedTimestampPinningEntityMap); + } + } + + public static final String PINNED_TIMESTAMPS = "pinned_timestamps"; + public static final ChecksumWritableBlobStoreFormat PINNED_TIMESTAMPS_FORMAT = new ChecksumWritableBlobStoreFormat<>( + PINNED_TIMESTAMPS, + PinnedTimestamps::readFrom + ); + + private PinnedTimestamps pinnedTimestamps; + + public RemotePinnedTimestamps(String clusterUUID, Compressor compressor) { + super(clusterUUID, compressor); + pinnedTimestamps = new PinnedTimestamps(new HashMap<>()); + } + + @Override + public BlobPathParameters getBlobPathParameters() { + return new BlobPathParameters(List.of(PINNED_TIMESTAMPS), PINNED_TIMESTAMPS); + } + + @Override + public String getType() { + return PINNED_TIMESTAMPS; + } + + @Override + public String generateBlobFileName() { + return this.blobFileName = String.join(DELIMITER, PINNED_TIMESTAMPS, RemoteStoreUtils.invertLong(System.currentTimeMillis())); + } + + @Override + public InputStream serialize() throws IOException { + return PINNED_TIMESTAMPS_FORMAT.serialize(pinnedTimestamps, generateBlobFileName(), getCompressor()).streamInput(); + } + + @Override + public PinnedTimestamps deserialize(InputStream inputStream) throws IOException { + return PINNED_TIMESTAMPS_FORMAT.deserialize(blobName, Streams.readFully(inputStream)); + } + + public void setBlobFileName(String blobFileName) { + this.blobFileName = blobFileName; + } + + public void setPinnedTimestamps(PinnedTimestamps pinnedTimestamps) { + this.pinnedTimestamps = pinnedTimestamps; + } + + public PinnedTimestamps getPinnedTimestamps() { + return pinnedTimestamps; + } +} diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteRoutingTableBlobStore.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteRoutingTableBlobStore.java index 7c4a5bf2236a1..1a28c97dc8a77 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteRoutingTableBlobStore.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteRoutingTableBlobStore.java @@ -9,10 +9,13 @@ package org.opensearch.gateway.remote.model; import org.opensearch.common.blobstore.BlobPath; -import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; +import org.opensearch.common.remote.RemoteWriteableBlobEntity; import org.opensearch.common.remote.RemoteWriteableEntity; +import org.opensearch.common.remote.RemoteWriteableEntityBlobStore; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; +import org.opensearch.gateway.remote.RemoteClusterStateUtils; import org.opensearch.gateway.remote.routingtable.RemoteIndexRoutingTable; import org.opensearch.index.remote.RemoteStoreEnums; import org.opensearch.index.remote.RemoteStorePathStrategy; @@ -28,8 +31,8 @@ * @param which can be uploaded to / downloaded from blob store * @param The concrete class implementing {@link RemoteWriteableEntity} which is used as a wrapper for IndexRoutingTable entity. */ -public class RemoteRoutingTableBlobStore> extends - RemoteClusterStateBlobStore { +public class RemoteRoutingTableBlobStore> extends + RemoteWriteableEntityBlobStore { /** * This setting is used to set the remote routing table store blob store path type strategy. @@ -66,7 +69,14 @@ public RemoteRoutingTableBlobStore( String executor, ClusterSettings clusterSettings ) { - super(blobStoreTransferService, blobStoreRepository, clusterName, threadPool, executor); + super( + blobStoreTransferService, + blobStoreRepository, + clusterName, + threadPool, + executor, + RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN + ); this.pathType = clusterSettings.get(REMOTE_ROUTING_TABLE_PATH_TYPE_SETTING); this.pathHashAlgo = clusterSettings.get(REMOTE_ROUTING_TABLE_PATH_HASH_ALGO_SETTING); clusterSettings.addSettingsUpdateConsumer(REMOTE_ROUTING_TABLE_PATH_TYPE_SETTING, this::setPathTypeSetting); @@ -74,7 +84,7 @@ public RemoteRoutingTableBlobStore( } @Override - public BlobPath getBlobPathForUpload(final AbstractRemoteWritableBlobEntity obj) { + public BlobPath getBlobPathForUpload(final RemoteWriteableBlobEntity obj) { assert obj.getBlobPathParameters().getPathTokens().size() == 1 : "Unexpected tokens in RemoteRoutingTableObject"; BlobPath indexRoutingPath = getBlobPathPrefix(obj.clusterUUID()).add(INDEX_ROUTING_TABLE); diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteStorePinnedTimestampsBlobStore.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteStorePinnedTimestampsBlobStore.java new file mode 100644 index 0000000000000..2a65dd993d0af --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteStorePinnedTimestampsBlobStore.java @@ -0,0 +1,43 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote.model; + +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.remote.RemoteWriteableBlobEntity; +import org.opensearch.common.remote.RemoteWriteableEntityBlobStore; +import org.opensearch.index.translog.transfer.BlobStoreTransferService; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.threadpool.ThreadPool; + +/** + * Extends the RemoteClusterStateBlobStore to support {@link RemotePinnedTimestamps} + */ +public class RemoteStorePinnedTimestampsBlobStore extends RemoteWriteableEntityBlobStore< + RemotePinnedTimestamps.PinnedTimestamps, + RemotePinnedTimestamps> { + + public static final String PINNED_TIMESTAMPS_PATH_TOKEN = "pinned_timestamps"; + private final BlobStoreRepository blobStoreRepository; + + public RemoteStorePinnedTimestampsBlobStore( + BlobStoreTransferService blobStoreTransferService, + BlobStoreRepository blobStoreRepository, + String clusterName, + ThreadPool threadPool, + String executor + ) { + super(blobStoreTransferService, blobStoreRepository, clusterName, threadPool, executor, PINNED_TIMESTAMPS_PATH_TOKEN); + this.blobStoreRepository = blobStoreRepository; + } + + @Override + public BlobPath getBlobPathForUpload(final RemoteWriteableBlobEntity obj) { + return blobStoreRepository.basePath().add(PINNED_TIMESTAMPS_PATH_TOKEN); + } +} diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteTemplatesMetadata.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteTemplatesMetadata.java index 4513d35aef5e8..6ae8a533f9a21 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteTemplatesMetadata.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteTemplatesMetadata.java @@ -10,7 +10,7 @@ import org.opensearch.cluster.metadata.TemplatesMetadata; import org.opensearch.common.io.Streams; -import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; import org.opensearch.common.remote.BlobPathParameters; import org.opensearch.core.compress.Compressor; import org.opensearch.core.xcontent.NamedXContentRegistry; @@ -31,7 +31,7 @@ /** * Wrapper class for uploading/downloading {@link TemplatesMetadata} to/from remote blob store */ -public class RemoteTemplatesMetadata extends AbstractRemoteWritableBlobEntity { +public class RemoteTemplatesMetadata extends AbstractClusterMetadataWriteableBlobEntity { public static final String TEMPLATES_METADATA = "templates"; diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteTransientSettingsMetadata.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteTransientSettingsMetadata.java index fd0526f05d015..d4f3837f80084 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteTransientSettingsMetadata.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteTransientSettingsMetadata.java @@ -9,7 +9,7 @@ package org.opensearch.gateway.remote.model; import org.opensearch.common.io.Streams; -import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; import org.opensearch.common.remote.BlobPathParameters; import org.opensearch.common.settings.Settings; import org.opensearch.core.compress.Compressor; @@ -32,7 +32,7 @@ /** * Wrapper class for uploading/downloading transient {@link Settings} to/from remote blob store */ -public class RemoteTransientSettingsMetadata extends AbstractRemoteWritableBlobEntity { +public class RemoteTransientSettingsMetadata extends AbstractClusterMetadataWriteableBlobEntity { public static final String TRANSIENT_SETTING_METADATA = "transient-settings"; diff --git a/server/src/main/java/org/opensearch/gateway/remote/routingtable/RemoteIndexRoutingTable.java b/server/src/main/java/org/opensearch/gateway/remote/routingtable/RemoteIndexRoutingTable.java index 40b5bafde2b13..46c5074c48eb8 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/routingtable/RemoteIndexRoutingTable.java +++ b/server/src/main/java/org/opensearch/gateway/remote/routingtable/RemoteIndexRoutingTable.java @@ -10,7 +10,7 @@ import org.opensearch.cluster.routing.IndexRoutingTable; import org.opensearch.common.io.Streams; -import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; import org.opensearch.common.remote.BlobPathParameters; import org.opensearch.core.compress.Compressor; import org.opensearch.core.index.Index; @@ -27,7 +27,7 @@ /** * Remote store object for IndexRoutingTable */ -public class RemoteIndexRoutingTable extends AbstractRemoteWritableBlobEntity { +public class RemoteIndexRoutingTable extends AbstractClusterMetadataWriteableBlobEntity { public static final String INDEX_ROUTING_TABLE = "index-routing"; public static final String INDEX_ROUTING_METADATA_PREFIX = "indexRouting--"; diff --git a/server/src/main/java/org/opensearch/gateway/remote/routingtable/RemoteRoutingTableDiff.java b/server/src/main/java/org/opensearch/gateway/remote/routingtable/RemoteRoutingTableDiff.java index e876d939490d0..2370417dc14df 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/routingtable/RemoteRoutingTableDiff.java +++ b/server/src/main/java/org/opensearch/gateway/remote/routingtable/RemoteRoutingTableDiff.java @@ -12,7 +12,7 @@ import org.opensearch.cluster.routing.IndexRoutingTable; import org.opensearch.cluster.routing.RoutingTableIncrementalDiff; import org.opensearch.common.io.Streams; -import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; import org.opensearch.common.remote.BlobPathParameters; import org.opensearch.core.compress.Compressor; import org.opensearch.gateway.remote.ClusterMetadataManifest; @@ -30,7 +30,7 @@ * Represents a incremental difference between {@link org.opensearch.cluster.routing.RoutingTable} objects that can be serialized and deserialized. * This class is responsible for writing and reading the differences between RoutingTables to and from an input/output stream. */ -public class RemoteRoutingTableDiff extends AbstractRemoteWritableBlobEntity { +public class RemoteRoutingTableDiff extends AbstractClusterMetadataWriteableBlobEntity { private final RoutingTableIncrementalDiff routingTableIncrementalDiff; private long term; diff --git a/server/src/main/java/org/opensearch/http/HttpChannel.java b/server/src/main/java/org/opensearch/http/HttpChannel.java index ed20ec89a9099..7048f08faff9f 100644 --- a/server/src/main/java/org/opensearch/http/HttpChannel.java +++ b/server/src/main/java/org/opensearch/http/HttpChannel.java @@ -77,7 +77,7 @@ default void handleException(Exception ex) {} /** * Returns the contextual property associated with this specific HTTP channel (the - * implementation of how such properties are managed depends on the the particular + * implementation of how such properties are managed depends on the particular * transport engine). * * @param name the name of the property diff --git a/server/src/main/java/org/opensearch/index/IndexModule.java b/server/src/main/java/org/opensearch/index/IndexModule.java index 93ff1b78b1ac5..dc1bf94662385 100644 --- a/server/src/main/java/org/opensearch/index/IndexModule.java +++ b/server/src/main/java/org/opensearch/index/IndexModule.java @@ -97,8 +97,8 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; -import java.util.Iterator; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.Set; @@ -109,8 +109,6 @@ import java.util.function.Function; import java.util.function.Supplier; -import static org.apache.logging.log4j.util.Strings.toRootUpperCase; - /** * IndexModule represents the central extension point for index level custom implementations like: *